rook: Can't work on minikube

env:

# lsb_release -a
No LSB modules are available.
Distributor ID:	Ubuntu
Description:	Ubuntu 16.04.2 LTS
Release:	16.04
Codename:	xenial
# kubectl version
Client Version: version.Info{Major:"1", Minor:"8", GitVersion:"v1.8.1", GitCommit:"f38e43b221d08850172a9a4ea785a86a3ffa3b3a", GitTreeState:"clean", BuildDate:"2017-10-11T23:27:35Z", GoVersion:"go1.8.3", Compiler:"gc", Platform:"linux/amd64"}
Server Version: version.Info{Major:"1", Minor:"8", GitVersion:"v1.8.0", GitCommit:"0b9efaeb34a2fc51ff8e4d34ad9bc6375459c4a4", GitTreeState:"dirty", BuildDate:"2017-10-17T15:09:55Z", GoVersion:"go1.8.3", Compiler:"gc", Platform:"linux/amd64"}
# minikube version
minikube version: v0.23.0

issue:rook-ceph-mon0-hprzn 0/1 CrashLoopBackOff step:

# minikube delete
all ok.

# minikube start
all ok.

# kubectl create -f rook-operator.yaml 
namespace "rook-system" created
clusterrole "rook-operator" created
serviceaccount "rook-operator" created
clusterrolebinding "rook-operator" created
deployment "rook-operator" created

# kubectl get po --all-namespaces
NAMESPACE     NAME                             READY     STATUS    RESTARTS   AGE
kube-system   kube-addon-manager-minikube      1/1       Running   4          13h
kube-system   kube-dns-6fc954457d-wk4rk        3/3       Running   12         13h
kube-system   kubernetes-dashboard-45m7g       1/1       Running   6          13h
rook-system   rook-operator-599d4985f9-w278s   1/1       Running   0          2s

# kubectl create -f rook-cluster.yaml 
namespace "rook" created
cluster "rook" created

# kubectl get po --all-namespaces
NAMESPACE     NAME                             READY     STATUS    RESTARTS   AGE
kube-system   kube-addon-manager-minikube      1/1       Running   4          13h
kube-system   kube-dns-6fc954457d-wk4rk        3/3       Running   12         13h
kube-system   kubernetes-dashboard-45m7g       1/1       Running   6          13h
rook-system   rook-agent-s7pw2                 1/1       Running   0          20s
rook-system   rook-operator-599d4985f9-w278s   1/1       Running   0          24s
rook          rook-ceph-mon0-hprzn             0/1       Error     1          3s

# kubectl describe -n=rook po rook-ceph-mon0-hprzn
Name:           rook-ceph-mon0-hprzn
Namespace:      rook
Node:           minikube/192.168.99.100
Start Time:     Sun, 05 Nov 2017 07:24:55 +0800
Labels:         app=rook-ceph-mon
                mon=rook-ceph-mon0
                mon_cluster=rook
Annotations:    kubernetes.io/created-by={"kind":"SerializedReference","apiVersion":"v1","reference":{"kind":"ReplicaSet","namespace":"rook","name":"rook-ceph-mon0","uid":"5db7f151-c1b7-11e7-bb9a-080027e1b4f3","apiVe...
                rook_version=master
Status:         Running
IP:             172.17.0.5
Created By:     ReplicaSet/rook-ceph-mon0
Controlled By:  ReplicaSet/rook-ceph-mon0
Containers:
  rook-ceph-mon:
    Container ID:  docker://6e3842d68de6ae1f3b0e741148f10b7d149f259cf58bcc6be7f50cf8f46d5f34
    Image:         rook/rook:master
    Image ID:      docker-pullable://rook/rook@sha256:cd6a10c750845d39a5c717675cfede6db57919bde106bc2e7f6ab6d8b126ba48
    Port:          6790/TCP
    Args:
      mon
      --config-dir=/var/lib/rook
      --name=rook-ceph-mon0
      --port=6790
      --fsid=5d7bc11e-9bd4-42c3-8202-a7bb1cb5be06
    State:          Waiting
      Reason:       CrashLoopBackOff
    Last State:     Terminated
      Reason:       Error
      Message:      failed to run mon. failed to start mon: Failed to complete rook-ceph-mon0: exit status 28
      Exit Code:    1
      Started:      Sun, 05 Nov 2017 07:25:11 +0800
      Finished:     Sun, 05 Nov 2017 07:25:12 +0800
    Ready:          False
    Restart Count:  2
    Environment:
      ROOK_PRIVATE_IPV4:           (v1:status.podIP)
      ROOK_PUBLIC_IPV4:           10.0.0.112
      ROOK_CLUSTER_NAME:          rook
      ROOK_MON_ENDPOINTS:         <set to the key 'data' of config map 'rook-ceph-mon-endpoints'>  Optional: false
      ROOK_MON_SECRET:            <set to the key 'mon-secret' in secret 'rook-ceph-mon'>          Optional: false
      ROOK_ADMIN_SECRET:          <set to the key 'admin-secret' in secret 'rook-ceph-mon'>        Optional: false
      ROOK_CEPH_CONFIG_OVERRIDE:  /etc/rook/config/override.conf
    Mounts:
      /etc/rook/config from rook-config-override (rw)
      /var/lib/rook from rook-data (rw)
      /var/run/secrets/kubernetes.io/serviceaccount from default-token-fv4tv (ro)
Conditions:
  Type           Status
  Initialized    True 
  Ready          False 
  PodScheduled   True 
Volumes:
  rook-data:
    Type:  HostPath (bare host directory volume)
    Path:  /var/lib/rook
  rook-config-override:
    Type:      ConfigMap (a volume populated by a ConfigMap)
    Name:      rook-config-override
    Optional:  false
  default-token-fv4tv:
    Type:        Secret (a volume populated by a Secret)
    SecretName:  default-token-fv4tv
    Optional:    false
QoS Class:       BestEffort
Node-Selectors:  kubernetes.io/hostname=minikube
Tolerations:     <none>
Events:
  Type     Reason                 Age                From               Message
  ----     ------                 ----               ----               -------
  Normal   Scheduled              32s                default-scheduler  Successfully assigned rook-ceph-mon0-hprzn to minikube
  Normal   SuccessfulMountVolume  32s                kubelet, minikube  MountVolume.SetUp succeeded for volume "rook-data"
  Normal   SuccessfulMountVolume  32s                kubelet, minikube  MountVolume.SetUp succeeded for volume "rook-config-override"
  Normal   SuccessfulMountVolume  32s                kubelet, minikube  MountVolume.SetUp succeeded for volume "default-token-fv4tv"
  Normal   Pulled                 16s (x3 over 31s)  kubelet, minikube  Container image "rook/rook:master" already present on machine
  Normal   Created                16s (x3 over 31s)  kubelet, minikube  Created container
  Normal   Started                16s (x3 over 31s)  kubelet, minikube  Started container
  Warning  BackOff                1s (x4 over 29s)   kubelet, minikube  Back-off restarting failed container
  Warning  FailedSync             1s (x4 over 29s)   kubelet, minikube  Error syncing pod

About this issue

  • Original URL
  • State: closed
  • Created 7 years ago
  • Comments: 15 (10 by maintainers)

Commits related to this issue

Most upvoted comments

After change the rook-cluster.yaml from dataDirHostPath: /var/lib/rook to dataDirHostPath: /data/rook,it work fine. From https://github.com/kubernetes/minikube/blob/master/docs/persistent_volumes.md

# kubectl get po --all-namespaces
NAMESPACE     NAME                             READY     STATUS    RESTARTS   AGE
kube-system   kube-addon-manager-minikube      1/1       Running   0          2h
kube-system   kube-dns-6fc954457d-6jtq6        3/3       Running   0          2h
kube-system   kubernetes-dashboard-qfdbp       1/1       Running   2          2h
rook-system   rook-agent-z59h9                 1/1       Running   0          5m
rook-system   rook-operator-599d4985f9-7vhcj   1/1       Running   0          6m
rook          rook-api-5f95f65577-sc2ds        1/1       Running   0          4m
rook          rook-ceph-mgr0-cbb9df9d4-5hblj   1/1       Running   0          4m
rook          rook-ceph-mgr1-cf647cfcd-fbn5f   1/1       Running   0          4m
rook          rook-ceph-mon0-dwcgw             1/1       Running   0          5m
rook          rook-ceph-mon1-xv44c             1/1       Running   0          4m
rook          rook-ceph-mon2-wf7w2             1/1       Running   0          4m
rook          rook-ceph-osd-xzkqr              1/1       Running   0          4m

Closing plz.