Demo entry 6762035

Admin

   

Submitted by anonymous on Oct 10, 2018 at 04:23
Language: Bash Session. Code size: 6.0 kB.

root@compute09:~# ceph-deploy osd activate compute09:/dev/sdc3
[ceph_deploy.conf][DEBUG ] found configuration file at: /root/.cephdeploy.conf
[ceph_deploy.cli][INFO  ] Invoked (1.5.20): /usr/bin/ceph-deploy osd activate compute09:/dev/sdc3
[ceph_deploy.osd][DEBUG ] Activating cluster ceph disks compute09:/dev/sdc3:
[compute09][DEBUG ] connected to host: compute09 
[compute09][DEBUG ] detect platform information from remote host
[compute09][DEBUG ] detect machine type
[ceph_deploy.osd][INFO  ] Distro info: Ubuntu 14.04 trusty
[ceph_deploy.osd][DEBUG ] activating host compute09 disk /dev/sdc3
[ceph_deploy.osd][DEBUG ] will use init type: upstart
[compute09][INFO  ] Running command: ceph-disk -v activate --mark-init upstart --mount /dev/sdc3
[compute09][WARNIN] INFO:ceph-disk:Running command: /sbin/blkid -p -s TYPE -ovalue -- /dev/sdc3
[compute09][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph-conf --cluster=ceph --name=osd. --lookup osd_mount_options_xfs
[compute09][WARNIN] DEBUG:ceph-disk:Mounting /dev/sdc3 on /var/lib/ceph/tmp/mnt.B1FqSF with options rw,relatime,inode64,logbsize=256k,delaylog,allocsize=4M
[compute09][WARNIN] INFO:ceph-disk:Running command: /bin/mount -t xfs -o rw,relatime,inode64,logbsize=256k,delaylog,allocsize=4M -- /dev/sdc3 /var/lib/ceph/tmp/mnt.B1FqSF
[compute09][WARNIN] DEBUG:ceph-disk:Cluster uuid is 89888257-3ea9-455e-b80d-1b77082ef5e7
[compute09][WARNIN] INFO:ceph-disk:Running command: /usr/bin/ceph-osd --cluster=ceph --show-config-value=fsid
[compute09][WARNIN] DEBUG:ceph-disk:Cluster name is ceph
[compute09][WARNIN] DEBUG:ceph-disk:OSD uuid is 1912fa9a-d0f9-4cb9-b671-f04c692e010a
[compute09][WARNIN] DEBUG:ceph-disk:OSD id is 28
[compute09][WARNIN] DEBUG:ceph-disk:Marking with init system upstart
[compute09][WARNIN] DEBUG:ceph-disk:ceph osd.28 data dir is ready at /var/lib/ceph/tmp/mnt.B1FqSF
[compute09][WARNIN] INFO:ceph-disk:ceph osd.28 already mounted in position; unmounting ours.
[compute09][WARNIN] DEBUG:ceph-disk:Unmounting /var/lib/ceph/tmp/mnt.B1FqSF
[compute09][WARNIN] INFO:ceph-disk:Running command: /bin/umount -- /var/lib/ceph/tmp/mnt.B1FqSF
[compute09][WARNIN] DEBUG:ceph-disk:Starting ceph osd.28...
[compute09][WARNIN] INFO:ceph-disk:Running command: /sbin/initctl emit --no-wait -- ceph-osd cluster=ceph id=28
[compute09][INFO  ] checking OSD status...
[compute09][INFO  ] Running command: ceph --cluster=ceph osd stat --format=json
[compute09][WARNIN] there are 5 OSDs down
[compute09][WARNIN] there are 5 OSDs out
root@compute09:~# ceph osd tree
ID WEIGHT   TYPE NAME             UP/DOWN REWEIGHT PRIMARY-AFFINITY 
-1 31.34972 root default                                            
-2  1.34995     host controller01                                   
 0  0.26999         osd.0              up  1.00000          1.00000 
 2  0.26999         osd.2              up  1.00000          1.00000 
 1  0.26999         osd.1              up  1.00000          1.00000 
 3  0.26999         osd.3              up  1.00000          1.00000 
 4  0.26999         osd.4              up  1.00000          1.00000 
-3  1.34995     host controller02                                   
 5  0.26999         osd.5            down        0          1.00000 
 6  0.26999         osd.6            down        0          1.00000 
 7  0.26999         osd.7            down        0          1.00000 
 8  0.26999         osd.8            down        0          1.00000 
 9  0.26999         osd.9            down        0          1.00000 
-4  1.34995     host compute01                                      
10  0.26999         osd.10             up  1.00000          1.00000 
11  0.26999         osd.11             up  1.00000          1.00000 
12  0.26999         osd.12             up  1.00000          1.00000 
13  0.26999         osd.13             up  1.00000          1.00000 
14  0.26999         osd.14             up  1.00000          1.00000 
-5  9.09996     host compute02                                      
15  1.81999         osd.15             up  1.00000          1.00000 
16  1.81999         osd.16             up  1.00000          1.00000 
17  1.81999         osd.17             up  1.00000          1.00000 
18  1.81999         osd.18             up  1.00000          1.00000 
19  1.81999         osd.19             up  1.00000          1.00000 
-6  9.09996     host compute03                                      
20  1.81999         osd.20             up  1.00000          1.00000 
21  1.81999         osd.21             up  1.00000          1.00000 
22  1.81999         osd.22             up  1.00000          1.00000 
23  1.81999         osd.23             up  1.00000          1.00000 
24  1.81999         osd.24             up  1.00000          1.00000 
-7  9.09996     host compute09                                      
25  1.81999         osd.25             up  1.00000          1.00000 
26  1.81999         osd.26             up  1.00000          1.00000 
27  1.81999         osd.27             up  1.00000          1.00000 
29  1.81999         osd.29             up  1.00000          1.00000 
30  1.81999         osd.30             up  1.00000          1.00000 
root@compute09:~# cd /var/lib/ceph/osd/
root@compute09:/var/lib/ceph/osd# kls
No command 'kls' found, did you mean:
 Command 'fls' from package 'sleuthkit' (universe)
 Command 'jls' from package 'sleuthkit' (universe)
 Command 'kjs' from package 'kdelibs-bin' (main)
 Command 'bls' from package 'bacula-sd' (main)
 Command 'hls' from package 'hfsutils' (main)
 Command 'ls' from package 'coreutils' (main)
 Command 'ols' from package 'speech-tools' (universe)
 Command 'als' from package 'atool' (universe)
 Command 'kas' from package 'openafs-kpasswd' (universe)
 Command 'tls' from package 'python-tlslite' (universe)
 Command 'ils' from package 'sleuthkit' (universe)
kls: command not found
root@compute09:/var/lib/ceph/osd# ls
ceph-25  ceph-26  ceph-27  ceph-28  ceph-29  ceph-30

This snippet took 0.01 seconds to highlight.

Back to the Entry List or Home.

Delete this entry (admin only).