ceph安裝問題

ceph-deploy安裝javascript

 

Yum priorities plugin
Loaded plugins: fastestmirrorjava

Loaded plugins: fastestmirror, prioritiespython


yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpmjquery

cat << EOM > /etc/yum.repos.d/ceph.repo
[ceph-noarch]
name=Ceph noarch packages
baseurl=https://download.ceph.com/rpm-mimic/el7/noarch
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://download.ceph.com/keys/release.asc
EOMlinux

yum install ceph-deploy ntp ntpdate ntp-doc -yredis

ssh-keygen
#下面得一條條執行
ssh-copy-id ceph1
ssh-copy-id ceph2
ssh-copy-id ceph3apache

echo "192.168.7.151 ceph1" >> /etc/hosts
echo "192.168.7.152 ceph2" >> /etc/hosts
echo "192.168.7.153 ceph3" >> /etc/hosts
systemctl stop firewalld
systemctl disable firewalld
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
setenforce 0
cat /etc/sysconfig/selinuxjson

##################################################flask

useradd sceph
passwd scephbootstrap

echo "sceph ALL = (root) NOPASSWD:ALL" | tee /etc/sudoers.d/sceph
chmod 0440 /etc/sudoers.d/sceph

chmod u+w /etc/sudoers.d/sceph
vi /etc/sudoers.d/sceph
Defaults:sceph !requiretty
chmod u-w /etc/sudoers.d/sceph

 

wget https://bootstrap.pypa.io/get-pip.py
python get-pip.py
pip -V


安裝這個就好了
yum -y install python2-pip


mkdir my-cluster
cd my-cluster

##################################################
ceph-deploy new ceph1
ceph-deploy install ceph1 ceph2 ceph3


就這5個關鍵包
epel-release
yum-plugin-priorities
ceph-release
ceph.x86_64 2:13.2.5-0.el7
ceph-radosgw.x86_64 2:13.2.5-0.el7
sudo yum -y install ceph ceph-radosgw

mount -t ceph 192.168.7.101:6789:/ /mnt/mycephfs1 -o name=admin,secretfile=/etc/ceph/admin.secret

AQAY9JJcbtuaExAA2wVIqz6w5KrEiOA1S3JIMA==

ceph-deploy mon create-initial

 

 =============================================================================

 

使用Ceph RBD爲Kubernetes集羣提供存儲卷
集成過程依舊少不了「趟坑」,
Third party cloud provisioning platforms such as OpenStack, CloudStack, OpenNebula, ProxMox, etc.

===================================================
[sceph@ceph1 ~]$ ceph-deploy new ceph1
[ceph_deploy.conf][DEBUG ] found configuration file at: /home/sceph/.cephdeploy. conf
[ceph_deploy.cli][INFO ] Invoked (2.0.1): /usr/bin/ceph-deploy new ceph1
[ceph_deploy.cli][INFO ] ceph-deploy options:
[ceph_deploy.cli][INFO ] username : None
[ceph_deploy.cli][INFO ] func : <function new at 0x7f af668ec320>
[ceph_deploy.cli][INFO ] verbose : False
[ceph_deploy.cli][INFO ] overwrite_conf : False
[ceph_deploy.cli][INFO ] quiet : False
[ceph_deploy.cli][INFO ] cd_conf : <ceph_deploy.conf.cep hdeploy.Conf instance at 0x7faf66061830>
[ceph_deploy.cli][INFO ] cluster : ceph
[ceph_deploy.cli][INFO ] ssh_copykey : True
[ceph_deploy.cli][INFO ] mon : ['ceph1']
[ceph_deploy.cli][INFO ] public_network : None
[ceph_deploy.cli][INFO ] ceph_conf : None
[ceph_deploy.cli][INFO ] cluster_network : None
[ceph_deploy.cli][INFO ] default_release : False
[ceph_deploy.cli][INFO ] fsid : None
[ceph_deploy.new][DEBUG ] Creating new cluster named ceph
[ceph_deploy.new][INFO ] making sure passwordless SSH succeeds
[ceph1][DEBUG ] connection detected need for sudo
[ceph1][DEBUG ] connected to host: ceph1
[ceph1][DEBUG ] detect platform information from remote host
[ceph1][DEBUG ] detect machine type
[ceph1][DEBUG ] find the location of an executable
[ceph1][INFO ] Running command: sudo /usr/sbin/ip link show
[ceph1][INFO ] Running command: sudo /usr/sbin/ip addr show
[ceph1][DEBUG ] IP addresses found: [u'192.168.7.151']
[ceph_deploy.new][DEBUG ] Resolving host ceph1
[ceph_deploy.new][DEBUG ] Monitor ceph1 at 192.168.7.151
[ceph_deploy.new][DEBUG ] Monitor initial members are ['ceph1']
[ceph_deploy.new][DEBUG ] Monitor addrs are ['192.168.7.151']
[ceph_deploy.new][DEBUG ] Creating a random mon key...
[ceph_deploy.new][DEBUG ] Writing monitor keyring to ceph.mon.keyring...
[ceph_deploy.new][DEBUG ] Writing initial config to ceph.conf...
[sceph@ceph1 ~]$ ls
ceph.conf ceph-deploy-ceph.log ceph.mon.keyring my-cluster


=========================================================

問題1

[ceph1][DEBUG ] Installed:
[ceph1][DEBUG ] ceph-release.noarch 0:1-1.el7
[ceph1][DEBUG ]
[ceph1][DEBUG ] Complete!
[ceph1][WARNIN] ensuring that /etc/yum.repos.d/ceph.repo contains a high priority
[ceph_deploy][ERROR ] RuntimeError: NoSectionError: No section: 'ceph'


解決
yum remove ceph-release
[root@ceph3 ~]# cd /etc/yum.repos.d/
[root@ceph3 yum.repos.d]# ll
total 48
-rw-r--r--. 1 root root 1664 Nov 23 21:16 CentOS-Base.repo
-rw-r--r--. 1 root root 1309 Nov 23 21:16 CentOS-CR.repo
-rw-r--r--. 1 root root 649 Nov 23 21:16 CentOS-Debuginfo.repo
-rw-r--r--. 1 root root 314 Nov 23 21:16 CentOS-fasttrack.repo
-rw-r--r--. 1 root root 630 Nov 23 21:16 CentOS-Media.repo
-rw-r--r--. 1 root root 1331 Nov 23 21:16 CentOS-Sources.repo
-rw-r--r--. 1 root root 5701 Nov 23 21:16 CentOS-Vault.repo
-rw-r--r-- 1 root root 535 May 5 2018 ceph.repo.rpmnew
-rw-r--r--. 1 root root 178 Mar 20 17:34 ceph.repo.rpmsave
-rw-r--r--. 1 root root 951 Oct 3 2017 epel.repo
-rw-r--r--. 1 root root 1050 Oct 3 2017 epel-testing.repo
[root@ceph3 yum.repos.d]# rm -rf ceph.repo.rpm*


========================================================

問題2

[ceph_deploy.install][DEBUG ] Detecting platform for host ceph2 ...
[ceph2][DEBUG ] connection detected need for sudo

We trust you have received the usual lecture from the local System
Administrator. It usually boils down to these three things:

#1) Respect the privacy of others.
#2) Think before you type.
#3) With great power comes great responsibility.

sudo: no tty present and no askpass program specified
[ceph_deploy][ERROR ] RuntimeError: connecting to host: ceph2 resulted in errors: IOError cannot send (already closed?)

 

解決
useradd sceph
passwd sceph

echo "sceph ALL = (root) NOPASSWD:ALL" | tee /etc/sudoers.d/sceph
chmod 0440 /etc/sudoers.d/sceph

chmod u+w /etc/sudoers.d/sceph
vi /etc/sudoers.d/sceph
Defaults:sceph !requiretty
chmod u-w /etc/sudoers.d/sceph


========================================================
問題3
[sceph@ceph1 ~]$ ceph -s
2019-03-21 17:44:54.373 7fee143b6700 -1 auth: unable to find a keyring on /etc/ceph/ceph.client.admin.keyring,/etc/ceph/ceph.keyring,/etc/ceph/keyring,/etc/ceph/keyring.bin,: (2) No such file or directory
2019-03-21 17:44:54.373 7fee143b6700 -1 monclient: ERROR: missing keyring, cannot use cephx for authentication
[errno 2] error connecting to the cluster

解決
要用root用戶運行
sudo ceph -s


========================================================
問題4
[root@ceph1 ceph]# ceph -s
cluster:
id: d45b9e18-518c-4d01-bb41-b341c576e3c0
health: HEALTH_WARN
too few PGs per OSD (8 < min 30)

解決
8個pgs
3個osd
3副本

pgs/osd*replicas=8
8/3*3=8

每一個osd上均分了8/3 *3=8個pgs


=================================================
調優參數查看,在dashboard中的Cluster>Configuration Documentation中查看
osd_memory_target
mon_osd_cache_size
mds_cache_size

緩存盤的使用,在日誌上
osd_journal=/var/lib/ceph/osd/$cluster-$id/journal
數據盤的使用,在objects上


bluestore
filestore這個已過期

========================================================

yum-plugin-priorities

yum源優先級的一個文件。
是yum-plugin-priroites這個插件的一個文件。
用來給yum源分優先級的。
好比你在centos下有centos,epel,rpmfusion三個yum源。
三個yum源中可能含有相同的軟件,補丁之類的東西。
yum管理器爲了分辨更新系統或者安裝軟件的時候用那個yum源的軟件因此纔有這麼個東西。
若是說,設置centos官方的yum源優先級最高,epelyum源第二,rpmfusion第三。(用1到99來表示,1最高)
那在安裝程序的時候,先尋找centos的yum源,若是源裏面有要的程序,那就中止尋找,直接安裝找到的,若是沒有找到,就依次尋找epel和rpmfusion的源。
若是說三個yum源都含有同一個軟件,那就安裝優先級最高的yum源的。

 

========================================================


sudo ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.ceph1.asok mon_status

sudo /usr/bin/ceph --connect-timeout=25 --cluster=ceph --admin-daemon=/var/run/ceph/ceph-mon.ceph1.asok mon_status
sudo /usr/bin/ceph --connect-timeout=25 --cluster=ceph --name mon. --keyring=/var/lib/ceph/mon/ceph-ceph1/keyring auth get client.admin
sudo /usr/bin/ceph --connect-timeout=25 --cluster=ceph --name mon. --keyring=/var/lib/ceph/mon/ceph-ceph1/keyring auth get client.bootstrap-mds
sudo /usr/bin/ceph --connect-timeout=25 --cluster=ceph --name mon. --keyring=/var/lib/ceph/mon/ceph-ceph1/keyring auth get client.bootstrap-mgr
sudo /usr/bin/ceph --connect-timeout=25 --cluster=ceph --name mon. --keyring=/var/lib/ceph/mon/ceph-ceph1/keyring auth get client.bootstrap-osd
sudo /usr/bin/ceph --connect-timeout=25 --cluster=ceph --name mon. --keyring=/var/lib/ceph/mon/ceph-ceph1/keyring auth get client.bootstrap-rgw

 

 

========================================================

debian,pve,ceph獨立安裝

沒有成功,在於pve的ceph版本問題,pve5.4對應ceph luminous


配置源的方式
https://download.ceph.com/debian-nautilus/ stretch main
http://mirrors.163.com/ceph/debian-nautilus/ stretch main
'deb https://download.ceph.com/debian-luminous/ {codename} main'

wget -q -O- 'https://download.ceph.com/keys/release.asc' | sudo apt-key add -
wget -q -O- 'http://mirrors.163.com/ceph/keys/release.asc' | sudo apt-key add -
sudo apt-get update && sudo apt-get install ceph ceph-mds


wget -O- 'http://mirrors.163.com/ceph/keys/release.asc' | apt-key add -
apt-get update && apt-get install ceph ceph-mds


主動下載的方式
If you are attempting to install behind a firewall in an environment without internet access,

wget -q https://download.ceph.com/debian-{release}/pool/main/c/ceph/ceph_{version}{distro}_{arch}.deb

sudo apt-get update && sudo apt-get install ceph-deploy


apt-get install ceph-mds
ceph-mds
ceph-fs-common


apt-get install ceph

The following additional packages will be installed:
binutils ceph-base ceph-mon ceph-osd cryptsetup-bin javascript-common libjs-jquery libleveldb1v5 libopts25 libparted2 ntp parted python-blinker python-click python-colorama python-flask python-itsdangerous python-jinja2
python-markupsafe python-pyinotify python-simplejson python-werkzeug uuid-runtime xfsprogs
Suggested packages:
binutils-doc ceph-mds apache2 | lighttpd | httpd libparted-dev libparted-i18n ntp-doc parted-doc python-blinker-doc python-flask-doc python-jinja2-doc python-pyinotify-doc ipython python-genshi python-lxml python-greenlet
python-redis python-pylibmc | python-memcache python-werkzeug-doc xfsdump acl quota
The following NEW packages will be installed:
binutils ceph ceph-base ceph-mon ceph-osd cryptsetup-bin javascript-common libjs-jquery libleveldb1v5 libopts25 libparted2 ntp parted python-blinker python-click python-colorama python-flask python-itsdangerous python-jinja2
python-markupsafe python-pyinotify python-simplejson python-werkzeug uuid-runtime xfsprogs
0 upgraded, 25 newly installed, 0 to remove and 8 not upgraded.
Need to get 28.4 MB of archives.
After this operation, 115 MB of additional disk space will be used.

 


root@d1:/etc/apt# dpkg -l|grep ceph
ii ceph 10.2.11-2 amd64 distributed storage and file system
ii ceph-base 10.2.11-2 amd64 common ceph daemon libraries and management tools
ii ceph-common 10.2.11-2 amd64 common utilities to mount and interact with a ceph storage cluster
ii ceph-fs-common 10.2.11-2 amd64 common utilities to mount and interact with a ceph file system
ii ceph-fuse 10.2.11-2 amd64 FUSE-based client for the Ceph distributed file system
ii ceph-mds 10.2.11-2 amd64 metadata server for the ceph distributed file system
ii ceph-mon 10.2.11-2 amd64 monitor server for the ceph storage system
ii ceph-osd 10.2.11-2 amd64 OSD server for the ceph storage system
ii libcephfs1 10.2.11-2 amd64 Ceph distributed file system client library
ii python-cephfs 10.2.11-2 amd64 Python libraries for the Ceph libcephfs library

 

 


root@d1:~# dpkg -l|wc -l
595
root@d1:~# dpkg -l|grep ceph
ii ceph-common 10.2.11-2 amd64 common utilities to mount and interact with a ceph storage cluster
ii ceph-fuse 10.2.11-2 amd64 FUSE-based client for the Ceph distributed file system
ii libcephfs1 10.2.11-2 amd64 Ceph distributed file system client library
ii python-cephfs 10.2.11-2 amd64 Python libraries for the Ceph libcephfs library

 

 

 

visudo
c1 ALL(ALL) ALL

sudo vi /etc/ceph/ceph.conf
uuidgen

ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'
sudo ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'
sudo ceph-authtool --create-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring --gen-key -n client.bootstrap-osd --cap mon 'profile bootstrap-osd'
sudo ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring
sudo ceph-authtool /tmp/ceph.mon.keyring --import-keyring /var/lib/ceph/bootstrap-osd/ceph.keyring
monmaptool --create --add d1 192.168.8.2 --fsid 0f0baa68-0787-4b32-a3c3-e46b6e50c5f6 /tmp/monmap
sudo mkdir /var/lib/ceph/mon/ceph-d1
sudo -u ceph ceph-mon --mkfs -i d1 --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring


[global]
fsid = 0f0baa68-0787-4b32-a3c3-e46b6e50c5f6
mon initial members = d1
mon host = 192.168.8.2
public network = 192.168.8.0/24
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
osd journal size = 1024
osd pool default size = 3
osd pool default min size = 2
osd pool default pg num = 333
osd pool default pgp num = 333
osd crush chooseleaf type = 1

sudo systemctl start ceph-mon@d1

sudo /etc/init.d/ceph start mon.d1

ceph -s

相關文章
相關標籤/搜索