Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Bug: failed to init masters: generate init config error: exit status 1 #4738

Closed
luanshaotong opened this issue May 14, 2024 · 4 comments · Fixed by #4748
Closed

Bug: failed to init masters: generate init config error: exit status 1 #4738

luanshaotong opened this issue May 14, 2024 · 4 comments · Fixed by #4748
Labels
kind/bug Something isn't working

Comments

@luanshaotong
Copy link
Member

luanshaotong commented May 14, 2024

Sealos Version

v5.0.0-beta5

How to reproduce the bug?

install k8s-docker on kylinV10-desktop-2020,

image

image

What is the expected behavior?

No response

What do you see instead?

No response

Operating environment

root@sealos-arm64:~/sealos-cloud# uname -a  
Linux sealos-arm64 4.4.131-20200710.kylin.desktop-generic #kylin SMP Fri Jul 10 13:46:24 CST 2020 aarch64 aarch64 aarch64 GNU/Linux
CriVersionInfo:
  RuntimeApiVersion: v1
  RuntimeName: docker
  RuntimeVersion: 25.0.5
  Version: 0.1.0
SealosVersion:
  buildDate: "2024-04-12T11:26:53Z"
  compiler: gc
  gitCommit: a0b3363d9
  gitVersion: 5.0.0-beta5
  goVersion: go1.20.14
  platform: linux/arm64

Additional information

/root/.sealos/default/Clusterfile

apiVersion: apps.sealos.io/v1beta1
kind: Cluster
metadata:
  creationTimestamp: "2024-05-14T08:53:37Z"
  name: default
spec:
  hosts:
  - ips:
    - 192.168.0.17:22
    roles:
    - master
    - arm64
  image:
  - docker.io/labring/kubernetes-docker:v1.27.13
  ssh:
    pk: /root/.ssh/id_rsa
    port: 22
status:
  conditions:
  - lastHeartbeatTime: "2024-05-14T08:53:37Z"
    message: 'failed to init masters: generate init config error: exit status 1'
    reason: Apply Cluster
    status: "False"
    type: ApplyClusterError
  mounts:
  - env:
      SEALOS_SYS_CRI_ENDPOINT: /var/run/cri-dockerd.sock
      SEALOS_SYS_IMAGE_ENDPOINT: /var/run/image-cri-shim.sock
      criData: /var/lib/docker
      criDockerdData: /var/lib/cri-dockerd
      defaultVIP: 10.103.97.2
      registryConfig: /etc/registry
      registryData: /var/lib/registry
      registryDomain: sealos.hub
      registryPassword: passw0rd
      registryPort: "5000"
      registryUsername: admin
      sandboxImage: pause:3.9
    imageName: docker.io/labring/kubernetes-docker:v1.27.13
    labels:
      check: check.sh $registryData
      clean: clean.sh && bash clean-cri.sh $criData $criDockerdData
      clean-registry: clean-registry.sh $registryData $registryConfig
      image: ghcr.io/labring/lvscare:v4.3.7
      init: init-cri.sh && bash init.sh
      init-registry: init-registry.sh $registryData $registryConfig
      io.buildah.version: 1.30.0
      org.opencontainers.image.description: kubernetes-v1.27.13 container image
      org.opencontainers.image.licenses: MIT
      org.opencontainers.image.source: https://github.com/labring-actions/cache
      sealos.io.type: rootfs
      sealos.io.version: v1beta1
      version: v1.27.13
      vip: $defaultVIP
    mountPoint: /var/lib/containers/storage/overlay/1ba4f3d0f9c092addc65764a5614d0ce3cfdff05fef7db4d4f0880d6c7b50d22/merged
    name: default-adgrucyo
    type: rootfs
  phase: ClusterFailed

---
apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.0.17
  bindPort: 6443
nodeRegistration:
  criSocket: /run/containerd/containerd.sock
  taints: []

---
apiServer:
  certSANs:
  - 127.0.0.1
  - apiserver.cluster.local
  - 10.103.97.2
  - 192.168.0.17
  extraArgs:
    audit-log-format: json
    audit-log-maxage: "7"
    audit-log-maxbackup: "10"
    audit-log-maxsize: "100"
    audit-log-path: /var/log/kubernetes/audit.log
    audit-policy-file: /etc/kubernetes/audit-policy.yml
    enable-aggregator-routing: "true"
    feature-gates: ""
  extraVolumes:
  - hostPath: /etc/kubernetes
    mountPath: /etc/kubernetes
    name: audit
    pathType: DirectoryOrCreate
  - hostPath: /var/log/kubernetes
    mountPath: /var/log/kubernetes
    name: audit-log
    pathType: DirectoryOrCreate
  - hostPath: /etc/localtime
    mountPath: /etc/localtime
    name: localtime
    pathType: File
    readOnly: true
apiVersion: kubeadm.k8s.io/v1beta3
controlPlaneEndpoint: apiserver.cluster.local:6443
controllerManager:
  extraArgs:
    bind-address: 0.0.0.0
    cluster-signing-duration: 876000h
    feature-gates: ""
  extraVolumes:
  - hostPath: /etc/localtime
    mountPath: /etc/localtime
    name: localtime
    pathType: File
    readOnly: true
dns: {}
etcd:
  local:
    dataDir: ""
    extraArgs:
      listen-metrics-urls: http://0.0.0.0:2381
kind: ClusterConfiguration
kubernetesVersion: v1.27.13
networking:
  podSubnet: 100.64.0.0/10
  serviceSubnet: 10.96.0.0/22
scheduler:
  extraArgs:
    bind-address: 0.0.0.0
    feature-gates: ""
  extraVolumes:
  - hostPath: /etc/localtime
    mountPath: /etc/localtime
    name: localtime
    pathType: File
    readOnly: true

---
apiVersion: kubeadm.k8s.io/v1beta3
caCertPath: /etc/kubernetes/pki/ca.crt
controlPlane:
  localAPIEndpoint:
    bindPort: 6443
discovery:
  timeout: 5m0s
kind: JoinConfiguration
nodeRegistration:
  criSocket: /run/containerd/containerd.sock
  taints: null

---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: 0.0.0.0
bindAddressHardFail: false
clientConnection:
  acceptContentTypes: ""
  burst: 10
  contentType: application/vnd.kubernetes.protobuf
  kubeconfig: ""
  qps: 5
clusterCIDR: ""
configSyncPeriod: 15m0s
conntrack:
  maxPerCore: 32768
  min: 131072
  tcpCloseWaitTimeout: 1h0m0s
  tcpEstablishedTimeout: 24h0m0s
detectLocal:
  bridgeInterface: ""
  interfaceNamePrefix: ""
detectLocalMode: ""
enableProfiling: false
healthzBindAddress: 0.0.0.0:10256
hostnameOverride: ""
iptables:
  localhostNodePorts: true
  masqueradeAll: false
  masqueradeBit: 14
  minSyncPeriod: 1s
  syncPeriod: 30s
ipvs:
  excludeCIDRs:
  - 10.103.97.2/32
  minSyncPeriod: 0s
  scheduler: ""
  strictARP: false
  syncPeriod: 30s
  tcpFinTimeout: 0s
  tcpTimeout: 0s
  udpTimeout: 0s
kind: KubeProxyConfiguration
metricsBindAddress: 0.0.0.0:10249
mode: ipvs
nodePortAddresses: null
oomScoreAdj: -999
portRange: ""
showHiddenMetricsForVersion: ""
winkernel:
  enableDSR: false
  forwardHealthCheckVip: false
  networkName: ""
  rootHnsEndpointName: ""
  sourceVip: ""

---
address: 0.0.0.0
apiVersion: kubelet.config.k8s.io/v1beta1
authentication:
  anonymous:
    enabled: false
  webhook:
    cacheTTL: 2m0s
    enabled: true
  x509:
    clientCAFile: /etc/kubernetes/pki/ca.crt
authorization:
  mode: Webhook
  webhook:
    cacheAuthorizedTTL: 5m0s
    cacheUnauthorizedTTL: 30s
cgroupDriver: cgroupfs
cgroupsPerQOS: true
configMapAndSecretChangeDetectionStrategy: Watch
containerLogMaxFiles: 5
containerLogMaxSize: 10Mi
containerRuntimeEndpoint: unix:///run/containerd/containerd.sock
contentType: application/vnd.kubernetes.protobuf
cpuCFSQuota: true
cpuCFSQuotaPeriod: 100ms
cpuManagerPolicy: none
cpuManagerReconcilePeriod: 10s
enableControllerAttachDetach: true
enableDebugFlagsHandler: true
enableDebuggingHandlers: true
enableProfilingHandler: true
enableServer: true
enableSystemLogHandler: true
enforceNodeAllocatable:
- pods
eventBurst: 10
eventRecordQPS: 5
evictionHard:
  imagefs.available: 10%
  memory.available: 100Mi
  nodefs.available: 10%
  nodefs.inodesFree: 5%
evictionPressureTransitionPeriod: 5m0s
failSwapOn: true
fileCheckFrequency: 20s
hairpinMode: promiscuous-bridge
healthzBindAddress: 0.0.0.0
healthzPort: 10248
httpCheckFrequency: 20s
imageGCHighThresholdPercent: 85
imageGCLowThresholdPercent: 75
imageMinimumGCAge: 2m0s
iptablesDropBit: 15
iptablesMasqueradeBit: 14
kind: KubeletConfiguration
kubeAPIBurst: 10
kubeAPIQPS: 5
localStorageCapacityIsolation: true
logging:
  flushFrequency: 5000000000
  format: text
  options:
    json:
      infoBufferSize: "0"
  verbosity: 0
makeIPTablesUtilChains: true
maxOpenFiles: 1000000
maxPods: 110
memoryManagerPolicy: None
memorySwap: {}
memoryThrottlingFactor: 0.9
nodeLeaseDurationSeconds: 40
nodeStatusMaxImages: 50
nodeStatusReportFrequency: 10s
nodeStatusUpdateFrequency: 10s
oomScoreAdj: -999
podPidsLimit: -1
port: 10250
registerNode: true
registryBurst: 10
registryPullQPS: 5
rotateCertificates: true
runtimeRequestTimeout: 2m0s
seccompDefault: false
serializeImagePulls: true
shutdownGracePeriod: 0s
shutdownGracePeriodCriticalPods: 0s
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 4h0m0s
syncFrequency: 1m0s
topologyManagerPolicy: none
topologyManagerScope: container
volumePluginDir: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/
volumeStatsAggPeriod: 1m0s
@luanshaotong luanshaotong added the kind/bug Something isn't working label May 14, 2024
@luanshaotong
Copy link
Member Author

image
with --debug parameter. how to fix it?

@luanshaotong
Copy link
Member Author

found a way to solve it:

sealos reset
apt remove docker.io containerd -y

then reinstall.

@luanshaotong
Copy link
Member Author

luanshaotong commented May 14, 2024

Does Sealos need to check if containerd already installed?

@fanux
Copy link
Member

fanux commented May 15, 2024

Dose Sealos need to check if containerd already installed?

Better

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
kind/bug Something isn't working
Projects
None yet
Development

Successfully merging a pull request may close this issue.

2 participants