如何在docker中安装docker(docker in docker)

dockerfile中加入下边一句

dockerfile中加入下边一句

RUN curl -fsSL get.docker.com | bash

脚本内容

#!/bin/sh
set -e

# This script is meant for quick & easy install via:
#   $ curl -fsSL https://get.docker.com -o get-docker.sh
#   $ sh get-docker.sh
#
# For test builds (ie. release candidates):
#   $ curl -fsSL https://test.docker.com -o test-docker.sh
#   $ sh test-docker.sh
#
# NOTE: Make sure to verify the contents of the script
#       you downloaded matches the contents of install.sh
#       located at https://github.com/docker/docker-install
#       before executing.
#
# Git commit from https://github.com/docker/docker-install when
# the script was uploaded (Should only be modified by upload job):
SCRIPT_COMMIT_SHA=cfba462


# The channel to install from:
#   * nightly
#   * test
#   * stable
#   * edge (deprecated)
DEFAULT_CHANNEL_VALUE="stable"
if [ -z "$CHANNEL" ]; then
	CHANNEL=$DEFAULT_CHANNEL_VALUE
fi

DEFAULT_DOWNLOAD_URL="https://download.docker.com"
if [ -z "$DOWNLOAD_URL" ]; then
	DOWNLOAD_URL=$DEFAULT_DOWNLOAD_URL
fi

DEFAULT_REPO_FILE="docker-ce.repo"
if [ -z "$REPO_FILE" ]; then
	REPO_FILE="$DEFAULT_REPO_FILE"
fi

mirror=''
DRY_RUN=${DRY_RUN:-}
while [ $# -gt 0 ]; do
	case "$1" in
		--mirror)
			mirror="$2"
			shift
			;;
		--dry-run)
			DRY_RUN=1
			;;
		--*)
			echo "Illegal option $1"
			;;
	esac
	shift $(( $# > 0 ? 1 : 0 ))
done

case "$mirror" in
	Aliyun)
		DOWNLOAD_URL="https://mirrors.aliyun.com/docker-ce"
		;;
	AzureChinaCloud)
		DOWNLOAD_URL="https://mirror.azure.cn/docker-ce"
		;;
esac

command_exists() {
	command -v "$@" > /dev/null 2>&1
}

is_dry_run() {
	if [ -z "$DRY_RUN" ]; then
		return 1
	else
		return 0
	fi
}

deprecation_notice() {
	distro=$1
	date=$2
	echo
	echo "DEPRECATION WARNING:"
	echo "    The distribution, $distro, will no longer be supported in this script as of $date."
	echo "    If you feel this is a mistake please submit an issue at https://github.com/docker/docker-install/issues/new"
	echo
	sleep 10
}

get_distribution() {
	lsb_dist=""
	# Every system that we officially support has /etc/os-release
	if [ -r /etc/os-release ]; then
		lsb_dist="$(. /etc/os-release && echo "$ID")"
	fi
	# Returning an empty string here should be alright since the
	# case statements don't act unless you provide an actual value
	echo "$lsb_dist"
}

add_debian_backport_repo() {
	debian_version="$1"
	backports="deb http://ftp.debian.org/debian $debian_version-backports main"
	if ! grep -Fxq "$backports" /etc/apt/sources.list; then
		(set -x; $sh_c "echo \"$backports\" >> /etc/apt/sources.list")
	fi
}

echo_docker_as_nonroot() {
	if is_dry_run; then
		return
	fi
	if command_exists docker && [ -e /var/run/docker.sock ]; then
		(
			set -x
			$sh_c 'docker version'
		) || true
	fi
	your_user=your-user
	[ "$user" != 'root' ] && your_user="$user"
	# intentionally mixed spaces and tabs here -- tabs are stripped by "<<-EOF", spaces are kept in the output
	echo "If you would like to use Docker as a non-root user, you should now consider"
	echo "adding your user to the \"docker\" group with something like:"
	echo
	echo "  sudo usermod -aG docker $your_user"
	echo
	echo "Remember that you will have to log out and back in for this to take effect!"
	echo
	echo "WARNING: Adding a user to the \"docker\" group will grant the ability to run"
	echo "         containers which can be used to obtain root privileges on the"
	echo "         docker host."
	echo "         Refer to https://docs.docker.com/engine/security/security/#docker-daemon-attack-surface"
	echo "         for more information."

}

# Check if this is a forked Linux distro
check_forked() {

	# Check for lsb_release command existence, it usually exists in forked distros
	if command_exists lsb_release; then
		# Check if the `-u` option is supported
		set +e
		lsb_release -a -u > /dev/null 2>&1
		lsb_release_exit_code=$?
		set -e

		# Check if the command has exited successfully, it means we're in a forked distro
		if [ "$lsb_release_exit_code" = "0" ]; then
			# Print info about current distro
			cat <<-EOF
			You're using '$lsb_dist' version '$dist_version'.
			EOF

			# Get the upstream release info
			lsb_dist=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'id' | cut -d ':' -f 2 | tr -d '[:space:]')
			dist_version=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'codename' | cut -d ':' -f 2 | tr -d '[:space:]')

			# Print info about upstream distro
			cat <<-EOF
			Upstream release is '$lsb_dist' version '$dist_version'.
			EOF
		else
			if [ -r /etc/debian_version ] && [ "$lsb_dist" != "ubuntu" ] && [ "$lsb_dist" != "raspbian" ]; then
				if [ "$lsb_dist" = "osmc" ]; then
					# OSMC runs Raspbian
					lsb_dist=raspbian
				else
					# We're Debian and don't even know it!
					lsb_dist=debian
				fi
				dist_version="$(sed 's/\/.*//' /etc/debian_version | sed 's/\..*//')"
				case "$dist_version" in
					9)
						dist_version="stretch"
					;;
					8|'Kali Linux 2')
						dist_version="jessie"
					;;
				esac
			fi
		fi
	fi
}

semverParse() {
	major="${1%%.*}"
	minor="${1#$major.}"
	minor="${minor%%.*}"
	patch="${1#$major.$minor.}"
	patch="${patch%%[-.]*}"
}

ee_notice() {
	echo
	echo
	echo "  WARNING: $1 is now only supported by Docker EE"
	echo "           Check https://store.docker.com for information on Docker EE"
	echo
	echo
}

ee_promo() {
	# Randomly select an advertisement
	echo
	echo "** DOCKER ENGINE - ENTERPRISE **"
	echo
	SHORT_URL=""
	case $(shuf -i 0-1 -n 1) in
		0)
			echo "Test drive additional security features by activating Docker Engine - Enterprise."
			echo
			echo "  * Leverage FIPS 140-2 validated encryption"
			echo "  * Run only trusted images with digital signature enforcement"
			SHORT_URL="https://dockr.ly/engine1"
			;;
		1)
			echo "If you鈥檙e ready for production workloads, Docker Engine - Enterprise also includes:"
			echo
			echo "  * SLA-backed technical support"
			echo "  * Extended lifecycle maintenance policy for patches and hotfixes"
			echo "  * Access to certified ecosystem content"
			SHORT_URL="https://dockr.ly/engine2"
			;;
	esac
	echo
	echo "** Learn more at ${SHORT_URL} **"
	echo
	echo "ACTIVATE your own engine to Docker Engine - Enterprise using:"
	echo
	echo "  sudo docker engine activate"
	echo
	sleep 10
}

do_install() {
	echo "# Executing docker install script, commit: $SCRIPT_COMMIT_SHA"

	if command_exists docker; then
		docker_version="$(docker -v | cut -d ' ' -f3 | cut -d ',' -f1)"
		MAJOR_W=1
		MINOR_W=10

		semverParse "$docker_version"

		shouldWarn=0
		if [ "$major" -lt "$MAJOR_W" ]; then
			shouldWarn=1
		fi

		if [ "$major" -le "$MAJOR_W" ] && [ "$minor" -lt "$MINOR_W" ]; then
			shouldWarn=1
		fi

		cat >&2 <<-'EOF'
			Warning: the "docker" command appears to already exist on this system.

			If you already have Docker installed, this script can cause trouble, which is
			why we're displaying this warning and provide the opportunity to cancel the
			installation.

			If you installed the current Docker package using this script and are using it
		EOF

		if [ $shouldWarn -eq 1 ]; then
			cat >&2 <<-'EOF'
			again to update Docker, we urge you to migrate your image store before upgrading
			to v1.10+.

			You can find instructions for this here:
			https://github.com/docker/docker/wiki/Engine-v1.10.0-content-addressability-migration
			EOF
		else
			cat >&2 <<-'EOF'
			again to update Docker, you can safely ignore this message.
			EOF
		fi

		cat >&2 <<-'EOF'

			You may press Ctrl+C now to abort this script.
		EOF
		( set -x; sleep 20 )
	fi

	user="$(id -un 2>/dev/null || true)"

	sh_c='sh -c'
	if [ "$user" != 'root' ]; then
		if command_exists sudo; then
			sh_c='sudo -E sh -c'
		elif command_exists su; then
			sh_c='su -c'
		else
			cat >&2 <<-'EOF'
			Error: this installer needs the ability to run commands as root.
			We are unable to find either "sudo" or "su" available to make this happen.
			EOF
			exit 1
		fi
	fi

	if is_dry_run; then
		sh_c="echo"
	fi

	# perform some very rudimentary platform detection
	lsb_dist=$( get_distribution )
	lsb_dist="$(echo "$lsb_dist" | tr '[:upper:]' '[:lower:]')"

	case "$lsb_dist" in

		ubuntu)
			if command_exists lsb_release; then
				dist_version="$(lsb_release --codename | cut -f2)"
			fi
			if [ -z "$dist_version" ] && [ -r /etc/lsb-release ]; then
				dist_version="$(. /etc/lsb-release && echo "$DISTRIB_CODENAME")"
			fi
		;;

		debian|raspbian)
			dist_version="$(sed 's/\/.*//' /etc/debian_version | sed 's/\..*//')"
			case "$dist_version" in
				9)
					dist_version="stretch"
				;;
				8)
					dist_version="jessie"
				;;
			esac
		;;

		centos)
			if [ -z "$dist_version" ] && [ -r /etc/os-release ]; then
				dist_version="$(. /etc/os-release && echo "$VERSION_ID")"
			fi
		;;

		rhel|ol|sles)
			ee_notice "$lsb_dist"
			exit 1
			;;

		*)
			if command_exists lsb_release; then
				dist_version="$(lsb_release --release | cut -f2)"
			fi
			if [ -z "$dist_version" ] && [ -r /etc/os-release ]; then
				dist_version="$(. /etc/os-release && echo "$VERSION_ID")"
			fi
		;;

	esac

	# Check if this is a forked Linux distro
	check_forked

	# Run setup for each distro accordingly
	case "$lsb_dist" in
		ubuntu|debian|raspbian)
			pre_reqs="apt-transport-https ca-certificates curl"
			if [ "$lsb_dist" = "debian" ]; then
				# libseccomp2 does not exist for debian jessie main repos for aarch64
				if [ "$(uname -m)" = "aarch64" ] && [ "$dist_version" = "jessie" ]; then
					add_debian_backport_repo "$dist_version"
				fi
			fi

			if ! command -v gpg > /dev/null; then
				pre_reqs="$pre_reqs gnupg"
			fi
			apt_repo="deb [arch=$(dpkg --print-architecture)] $DOWNLOAD_URL/linux/$lsb_dist $dist_version $CHANNEL"
			(
				if ! is_dry_run; then
					set -x
				fi
				$sh_c 'apt-get update -qq >/dev/null'
				$sh_c "apt-get install -y -qq $pre_reqs >/dev/null"
				$sh_c "curl -fsSL \"$DOWNLOAD_URL/linux/$lsb_dist/gpg\" | apt-key add -qq - >/dev/null"
				$sh_c "echo \"$apt_repo\" > /etc/apt/sources.list.d/docker.list"
				$sh_c 'apt-get update -qq >/dev/null'
			)
			pkg_version=""
			if [ -n "$VERSION" ]; then
				if is_dry_run; then
					echo "# WARNING: VERSION pinning is not supported in DRY_RUN"
				else
					# Will work for incomplete versions IE (17.12), but may not actually grab the "latest" if in the test channel
					pkg_pattern="$(echo "$VERSION" | sed "s/-ce-/~ce~.*/g" | sed "s/-/.*/g").*-0~$lsb_dist"
					search_command="apt-cache madison 'docker-ce' | grep '$pkg_pattern' | head -1 | cut -d' ' -f 4"
					pkg_version="$($sh_c "$search_command")"
					echo "INFO: Searching repository for VERSION '$VERSION'"
					echo "INFO: $search_command"
					if [ -z "$pkg_version" ]; then
						echo
						echo "ERROR: '$VERSION' not found amongst apt-cache madison results"
						echo
						exit 1
					fi
					pkg_version="=$pkg_version"
				fi
			fi
			(
				if ! is_dry_run; then
					set -x
				fi
				$sh_c "apt-get install -y -qq --no-install-recommends docker-ce$pkg_version >/dev/null"
			)
			echo_docker_as_nonroot
			ee_promo
			exit 0
			;;
		centos|fedora)
			yum_repo="$DOWNLOAD_URL/linux/$lsb_dist/$REPO_FILE"
			if ! curl -Ifs "$yum_repo" > /dev/null; then
				echo "Error: Unable to curl repository file $yum_repo, is it valid?"
				exit 1
			fi
			if [ "$lsb_dist" = "fedora" ]; then
				pkg_manager="dnf"
				config_manager="dnf config-manager"
				enable_channel_flag="--set-enabled"
				disable_channel_flag="--set-disabled"
				pre_reqs="dnf-plugins-core"
				pkg_suffix="fc$dist_version"
			else
				pkg_manager="yum"
				config_manager="yum-config-manager"
				enable_channel_flag="--enable"
				disable_channel_flag="--disable"
				pre_reqs="yum-utils"
				pkg_suffix="el"
			fi
			(
				if ! is_dry_run; then
					set -x
				fi
				$sh_c "$pkg_manager install -y -q $pre_reqs"
				$sh_c "$config_manager --add-repo $yum_repo"

				if [ "$CHANNEL" != "stable" ]; then
					$sh_c "$config_manager $disable_channel_flag docker-ce-*"
					$sh_c "$config_manager $enable_channel_flag docker-ce-$CHANNEL"
				fi
				$sh_c "$pkg_manager makecache"
			)
			pkg_version=""
			if [ -n "$VERSION" ]; then
				if is_dry_run; then
					echo "# WARNING: VERSION pinning is not supported in DRY_RUN"
				else
					pkg_pattern="$(echo "$VERSION" | sed "s/-ce-/\\\\.ce.*/g" | sed "s/-/.*/g").*$pkg_suffix"
					search_command="$pkg_manager list --showduplicates 'docker-ce' | grep '$pkg_pattern' | tail -1 | awk '{print \$2}'"
					pkg_version="$($sh_c "$search_command")"
					echo "INFO: Searching repository for VERSION '$VERSION'"
					echo "INFO: $search_command"
					if [ -z "$pkg_version" ]; then
						echo
						echo "ERROR: '$VERSION' not found amongst $pkg_manager list results"
						echo
						exit 1
					fi
					# Cut out the epoch and prefix with a '-'
					pkg_version="-$(echo "$pkg_version" | cut -d':' -f 2)"
				fi
			fi
			(
				if ! is_dry_run; then
					set -x
				fi
				$sh_c "$pkg_manager install -y -q docker-ce$pkg_version"
			)
			echo_docker_as_nonroot
			ee_promo
			exit 0
			;;
	esac
	exit 1
}

# wrapped up in a function so that we have some protection against only getting
# half the file during "curl | sh"
do_install

基于Kubernetes搭建企业级可自动伸缩Jenkins slave集群

背景
贝壳在成立后借助链家雄厚的资本实力快速发展,事业部项目数量不断扩增,项目的开发语言涵盖了java、php、nodejs、golang、C等等类型。公司从一开始使用了jenkins来完成编译部署等一些自动化工作,但是随着项目的越来越多,天然单点受限的jenkins的瓶颈越来越突出。业务方大多数是自己挤出一台空闲机器作为编译机,将一些编译环境部署好后,联系jenkins管理员,将这台编译机作为slave节点加入jenkins集群,目前jenkins-slave的节点已经达到了六十多台物理机。但是从jenkins web 上我们可以清晰的看到,jenkins的队列依然保持在十几个任务积压的状态。为什么六十多个slave节点还是不能够满足当前业务方编译的需求呢?首先,编译环境各个业务方独立使用,切编译环境不统一,这是造成当前状况的根因。这种方式造成了节点空闲不均匀,资源利用率极低。而且一旦slave节点遭到破坏,需要人为的进行修复甚至重建,非常麻烦。我们通过kubernetes集群上部署jenkins,并利用Kubernetes插件来调用k8s集群实现动态的按需扩展jenkins-slave,从而实现有任务时自动增加slave节点,任务结束slave节点自动销毁。对于jenkins来说,slave节点(容器)是临时的,任务一结束就会销毁。

背景

贝壳在成立后借助链家雄厚的资本实力快速发展,事业部项目数量不断扩增,项目的开发语言涵盖了java、php、nodejs、golang、C等等类型。公司从一开始使用了jenkins来完成编译部署等一些自动化工作,但是随着项目的越来越多,天然单点受限的jenkins的瓶颈越来越突出。业务方大多数是自己挤出一台空闲机器作为编译机,将一些编译环境部署好后,联系jenkins管理员,将这台编译机作为slave节点加入jenkins集群,目前jenkins-slave的节点已经达到了六十多台物理机。但是从jenkins web 上我们可以清晰的看到,jenkins的队列依然保持在十几个任务积压的状态。为什么六十多个slave节点还是不能够满足当前业务方编译的需求呢?首先,编译环境各个业务方独立使用,切编译环境不统一,这是造成当前状况的根因。这种方式造成了节点空闲不均匀,资源利用率极低。而且一旦slave节点遭到破坏,需要人为的进行修复甚至重建,非常麻烦。我们通过kubernetes集群上部署jenkins,并利用Kubernetes插件来调用k8s集群实现动态的按需扩展jenkins-slave,从而实现有任务时自动增加slave节点,任务结束slave节点自动销毁。对于jenkins来说,slave节点(容器)是临时的,任务一结束就会销毁。

原理

资源创建:jenkins根据任务属性自动创建临时docker容器,并作为slave节点加入jenkins集群,实现资源的自动扩展;
负载均衡:slave节点的创建是基于kubernetes集群的调度,调度会充分考虑资源负载合理分配到对应节点;
资源释放:任务执行结束后,jenkins自动删除相关节点,并销毁相关docker容器,实现资源的释放;
编译环境:通过插件实现编译环境配置,不能通过插件实现的job也可通过指定节点模版的方式,解决了不同类型编译项目时使用不同的编译容器;
资源扩展:基于kubernetes的优势,在编译压力大资源不够的情况下,扩充kubernetes集群即可实现资源扩展;

部署

原理架构图:

搭建kubernetes集群:

我们使用单独一个Kubernetes集群来部署jenkins服务,如果您没有Kubernetes集群,那么您需要创建一个。参见(使用kubeadm安装单个master的Kubernetes 1.13集群)
jenkins访问通过ingress暴露服务,所以要在kubernetes集群中安装Ingress Controller。参见(kubernetes集群中使用ingress)

部署jenkins到kubernetes集群:

创建master的pv和pvc

注:这里存储使用了10.26.10.47上的nfs,nfs的搭建这里不做详解,也可以将此处改为本地host也可以,但是最好还是将slave的valume使用共享存储实现跨节点。

[root@k8s01-test jenkins]# cat jenkins-master-pv.yaml
kind: PersistentVolume
apiVersion: v1
metadata:
  name: jenkins-master-pv
  labels:
    type: local
spec:
  storageClassName: manual
  capacity:
    storage: 20Gi
  accessModes:
    - ReadWriteMany
  nfs:
    path: /data0/docker/jenkins-master
    server: 10.26.10.47
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: jenkins-master-pvc
spec:
  storageClassName: manual
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 20Gi
创建slave的pv和pvc
[root@k8s01-test jenkins]# cat jenkins-slave-pv.yaml
kind: PersistentVolume
apiVersion: v1
metadata:
  name: jenkins-slave-pv
  labels:
    type: local
spec:
  storageClassName: manual
  capacity:
    storage: 50Gi
  accessModes:
    - ReadWriteMany
  nfs:
    path: /data0/docker/jenkins-slave
    server: 10.26.10.47
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: jenkins-slave-pvc
spec:
  storageClassName: manual
  accessModes:
    - ReadWriteMany
  resources:
    requests:
      storage: 50Gi
创建认证
[root@k8s01-test jenkins]# cat jenkins-account.yaml
---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: jenkins

---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
  name: jenkins
rules:
- apiGroups: [""]
  resources: ["pods"]
  verbs: ["create","delete","get","list","patch","update","watch"]
- apiGroups: [""]
  resources: ["pods/exec"]
  verbs: ["create","delete","get","list","patch","update","watch"]
- apiGroups: [""]
  resources: ["pods/log"]
  verbs: ["get","list","watch"]
- apiGroups: [""]
  resources: ["secrets"]
  verbs: ["get"]

---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
  name: jenkins
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: jenkins
subjects:
- kind: ServiceAccount
  name: jenkins
创建jenkins master
[root@k8s01-test jenkins]# cat jenkins.yaml
---
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
  name: jenkins
  labels:
    name: jenkins
spec:
  serviceName: jenkins
  replicas: 1
  updateStrategy:
    type: RollingUpdate
  template:
    metadata:
      name: jenkins
      labels:
        name: jenkins
    spec:
      terminationGracePeriodSeconds: 10
      serviceAccountName: jenkins
      containers:
        - name: jenkins
          image: jenkins/jenkins:lts-alpine
          imagePullPolicy: Always
          ports:
            - containerPort: 8080
            - containerPort: 50000
          resources:
            limits:
              cpu: 1
              memory: 1Gi
            requests:
              cpu: 0.5
              memory: 500Mi
          env:
            - name: LIMITS_MEMORY
              valueFrom:
                resourceFieldRef:
                  resource: limits.memory
                  divisor: 1Mi
            - name: JAVA_OPTS
              value: -Xmx$(LIMITS_MEMORY)m -XshowSettings:vm -Dhudson.slaves.NodeProvisioner.initialDelay=0 -Dhudson.slaves.NodeProvisioner.MARGIN=50 -Dhudson.slaves.NodeProvisioner.MARGIN0=0.85
          volumeMounts:
            - name: jenkins-home
              mountPath: /var/jenkins_home
          livenessProbe:
            httpGet:
              path: /login
              port: 8080
            initialDelaySeconds: 60
            timeoutSeconds: 5
            failureThreshold: 12 # ~2 minutes
          readinessProbe:
            httpGet:
              path: /login
              port: 8080
            initialDelaySeconds: 60
            timeoutSeconds: 5
            failureThreshold: 12 # ~2 minutes
      securityContext:
        fsGroup: 1000
      volumes:
        - name: jenkins-home
          persistentVolumeClaim:
            claimName: jenkins-master-pvc
---
apiVersion: v1
kind: Service
metadata:
  name: jenkins
spec:
  selector:
    name: jenkins

  ports:
    -
      name: http
      port: 80
      targetPort: 8080
      protocol: TCP
    -
      name: agent
      port: 50000
      protocol: TCP

---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
  name: jenkins
  annotations:
    nginx.ingress.kubernetes.io/ssl-redirect: "false"
    kubernetes.io/tls-acme: "false"
    nginx.ingress.kubernetes.io/proxy-body-size: 50m
    nginx.ingress.kubernetes.io/proxy-request-buffering: "off"
    ingress.kubernetes.io/ssl-redirect: "false"
    ingress.kubernetes.io/proxy-body-size: 50m
    ingress.kubernetes.io/proxy-request-buffering: "off"
spec:
  rules:
  - http:
      paths:
      - path: /
        backend:
          serviceName: jenkins
          servicePort: 80
    host: jenkins.intra.ke.com
 # - http:
   #   paths:
   #   - path: /
   #     backend:
   #       serviceName: jenkins
   #       servicePort: 80
   # host: jenkins.intra.ke.com
  #tls:
  #  #- hosts:
  #    #  - jenkins.intra.ke.com
  #      #  secretName: tls-jenkins

yaml可以参考官方文档:
https://github.com/jenkinsci/kubernetes-plugin/tree/master/src/main/kubernetes

配置解析

配置本机hosts或解析dns到k8s集群任意IP即可访问jenkins.intra.ke.com

安装配置插件实现自动伸缩

jenkins-slave自动伸缩功能需要借助安装Kubernetes插件来实现

安装插件

Kubernetes plugin
This plugin integrates Jenkins with Kubernetes

配置插件

jenkins》系统管理》系统设置》Add a new cloud 》kubernetes
添加kubernetes
Name: kubernetes
Kubernetes URL: https://kubernetes.default #由于我们的jenkins master 是部署在同一个集群,所以这里填写内部地址
Kubernetes Namespace: default
Jenkins URL: http://jenkins.default

添加Kubernetes Pod Template:
Name: jenkins-agent
Namespace: default
Labels: jenkins-agent

添加container Template
name: jnlp-slave
Docker image: jenkins/jnlp-slave:alpine
Jenkins slave root directory: /home/jenkins

添加卷
type:persistent volume claim
申明值:jenkins-slave-pvc
挂载路径:/home/jenkins




禁用默认Master构建代理

其他插件

GitLab Plugin插件

check out to sub_directory

artifactory插件


{
    "files": [
        {
            "pattern": "docker/${JOB_BASE_NAME}.tar.gz",
            "target": "ke-test-repository/${JOB_BASE_NAME}/${BUILD_NUMBER}/"

        }
    ]
}

docker插件

配置docker tcp方式 tcp不安全
vim /usr/lib/systemd/system/docker.service
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:4343 -H unix:///var/run/docker.sock

execute Docker command 插件实现docker images build pull push

还有一种方式,在 docker 单节点下,可以挂载 docker 到 jenkins 容器中,添加如下参数:
-v /var/run/docker.sock:/var/run/docker.sock -v $(which docker):/usr/bin/docker
但是我没有成功,估计是跟jenkins slave基础镜像alphine版有关系

Kubernetes Continuous Deploy 插件

通过SSH从主节点获取群集凭据。您也可以手动配置它。
变量替换资源配置,允许您进行动态资源部署。
私有Docker注册表的Docker登录凭据管理。
无需kubectl 在Jenkins从属节点上安装该工具。

Workspace Cleanup Plugin

清理构建数据,减少空间占用

参考文档

https://github.com/jenkinsci/kubernetes-plugin/tree/master/src/main/kubernetes
https://blog.csdn.net/aixiaoyang168/article/details/79767649



如何配置云kubernetes连接K8S集群的验证文件https://blog.csdn.net/mario_hao/article/details/81332546

http://www.updn.cn

kubernetes集群中安装使用ingress

在Kubernetes中,服务和Pod的IP地址仅可以在集群网络内部使用,对于集群外的应用是不可见的。为了使外部的应用能够访问集群内的服务,在Kubernetes目前提供了以下几种方案:
NodePortLoadBalancerIngress
Ingress由两部分组成:Ingress Controller和Ingress服务:
ingress controller
将新加入的Ingress转化成Nginx的配置文件并使之生效ingress服务
将Nginx的配置抽象成一个Ingress对象,每添加一个新的服务只需写一个新的Ingress的yaml文件即可
Ingress工作原理:
ingress controller通过和kubernetes api交互,动态的去感知集群中ingress规则变化,然后读取它,按照自定义的规则,规则就是写明了哪个域名对应哪个service,生成一段nginx配置,再写到nginx-ingress-control的pod里,这个Ingress controller的pod里运行着一个Nginx服务,控制器会把生成的nginx配置写入/etc/nginx.conf文件中,然后reload一下使配置生效。
以此达到域名分配置和动态更新的问题。
Ingress可以解决什么问题:
动态配置服务
如果按照传统方式, 当新增加一个服务时, 我们可能需要在流量入口加一个反向代理指向我们新的k8s服务. 而如果用了Ingress, 只需要配置好这个服务, 当服务启动时, 会自动注册到Ingress的中, 不需要而外的操作.减少不必要的端口暴露
配置过k8s的都清楚, 第一步是要关闭防火墙的, 主要原因是k8s的很多服务会以NodePort方式映射出去, 这样就相当于给宿主机打了很多孔, 既不安全也不优雅. 而Ingress可以避免这个问题, 除了Ingress自身服务可能需要映射出去, 其他服务都不要用NodePort方式
本文使用的是基于nginx的ingress
Ingress Controller是一个守护进程,部署为Kubernetes Pod,它监视apiserver的/ingresses端点以更新ingress配置,它的工作是执行Ingress的规则。

在Kubernetes中,服务和Pod的IP地址仅可以在集群网络内部使用,对于集群外的应用是不可见的。为了使外部的应用能够访问集群内的服务,在Kubernetes目前提供了以下几种方案:

  • NodePort
  • LoadBalancer
  • Ingress

Ingress由两部分组成:Ingress Controller和Ingress服务:

  • ingress controller
    将新加入的Ingress转化成Nginx的配置文件并使之生效
  • ingress服务
    将Nginx的配置抽象成一个Ingress对象,每添加一个新的服务只需写一个新的Ingress的yaml文件即可

Ingress工作原理:

  • ingress controller通过和kubernetes api交互,动态的去感知集群中ingress规则变化,
  • 然后读取它,按照自定义的规则,规则就是写明了哪个域名对应哪个service,生成一段nginx配置,
  • 再写到nginx-ingress-control的pod里,这个Ingress controller的pod里运行着一个Nginx服务,控制器会把生成的nginx配置写入/etc/nginx.conf文件中,
  • 然后reload一下使配置生效。
    以此达到域名分配置和动态更新的问题。

Ingress可以解决什么问题:

  • 动态配置服务
    如果按照传统方式, 当新增加一个服务时, 我们可能需要在流量入口加一个反向代理指向我们新的k8s服务. 而如果用了Ingress, 只需要配置好这个服务, 当服务启动时, 会自动注册到Ingress的中, 不需要而外的操作.
  • 减少不必要的端口暴露
    配置过k8s的都清楚, 第一步是要关闭防火墙的, 主要原因是k8s的很多服务会以NodePort方式映射出去, 这样就相当于给宿主机打了很多孔, 既不安全也不优雅. 而Ingress可以避免这个问题, 除了Ingress自身服务可能需要映射出去, 其他服务都不要用NodePort方式

本文使用的是基于nginx的ingress

Ingress Controller是一个守护进程,部署为Kubernetes Pod,它监视apiserver的/ingresses端点以更新ingress配置,它的工作是执行Ingress的规则。

Ingress Controller安装

kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/mandatory.yaml

Ingress Controller验证安装

kubectl get pods –all-namespaces -l app.kubernetes.io/name=ingress-nginx —watch

通过NodePort暴露ingress

service-nodeport.yaml

apiVersion: v1
kind: Service
metadata:
  name: ingress-nginx
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
spec:
  type: NodePort
  ports:
    - name: http
      nodePort: 30000
      port: 80
      targetPort: 80
      protocol: TCP
    - name: https
      nodePort: 30003
      port: 443
      targetPort: 443
      protocol: TCP
  selector:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx

通过主机网络暴露ingress

这可以通过启用hostNetworkPods规范中的选项来实现。

template:
   spec:
     hostNetwork:true

作者建议采用这种模式

示例
apiVersion: v1
kind: Namespace
metadata:
  name: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx

---

kind: ConfigMap
apiVersion: v1
metadata:
  name: nginx-configuration
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx

---
kind: ConfigMap
apiVersion: v1
metadata:
  name: tcp-services
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx

---
kind: ConfigMap
apiVersion: v1
metadata:
  name: udp-services
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx

---
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nginx-ingress-serviceaccount
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx

---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
  name: nginx-ingress-clusterrole
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
rules:
  - apiGroups:
      - ""
    resources:
      - configmaps
      - endpoints
      - nodes
      - pods
      - secrets
    verbs:
      - list
      - watch
  - apiGroups:
      - ""
    resources:
      - nodes
    verbs:
      - get
  - apiGroups:
      - ""
    resources:
      - services
    verbs:
      - get
      - list
      - watch
  - apiGroups:
      - "extensions"
    resources:
      - ingresses
    verbs:
      - get
      - list
      - watch
  - apiGroups:
      - ""
    resources:
      - events
    verbs:
      - create
      - patch
  - apiGroups:
      - "extensions"
    resources:
      - ingresses/status
    verbs:
      - update

---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
  name: nginx-ingress-role
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
rules:
  - apiGroups:
      - ""
    resources:
      - configmaps
      - pods
      - secrets
      - namespaces
    verbs:
      - get
  - apiGroups:
      - ""
    resources:
      - configmaps
    resourceNames:
      # Defaults to "<election-id>-<ingress-class>"
      # Here: "<ingress-controller-leader>-<nginx>"
      # This has to be adapted if you change either parameter
      # when launching the nginx-ingress-controller.
      - "ingress-controller-leader-nginx"
    verbs:
      - get
      - update
  - apiGroups:
      - ""
    resources:
      - configmaps
    verbs:
      - create
  - apiGroups:
      - ""
    resources:
      - endpoints
    verbs:
      - get

---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
  name: nginx-ingress-role-nisa-binding
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: Role
  name: nginx-ingress-role
subjects:
  - kind: ServiceAccount
    name: nginx-ingress-serviceaccount
    namespace: ingress-nginx

---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
  name: nginx-ingress-clusterrole-nisa-binding
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: nginx-ingress-clusterrole
subjects:
  - kind: ServiceAccount
    name: nginx-ingress-serviceaccount
    namespace: ingress-nginx

---

apiVersion: apps/v1
kind: Deployment
metadata:
  name: nginx-ingress-controller
  namespace: ingress-nginx
  labels:
    app.kubernetes.io/name: ingress-nginx
    app.kubernetes.io/part-of: ingress-nginx
spec:
  replicas: 3
  selector:
    matchLabels:
      app.kubernetes.io/name: ingress-nginx
      app.kubernetes.io/part-of: ingress-nginx
  template:
    metadata:
      labels:
        app.kubernetes.io/name: ingress-nginx
        app.kubernetes.io/part-of: ingress-nginx
      annotations:
        prometheus.io/port: "10254"
        prometheus.io/scrape: "true"
    spec:
      hostNetwork: true
      serviceAccountName: nginx-ingress-serviceaccount
      containers:
        - name: nginx-ingress-controller
          image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.23.0
          args:
            - /nginx-ingress-controller
            - --configmap=$(POD_NAMESPACE)/nginx-configuration
            - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services
            - --udp-services-configmap=$(POD_NAMESPACE)/udp-services
            - --publish-service=$(POD_NAMESPACE)/ingress-nginx
            - --annotations-prefix=nginx.ingress.kubernetes.io
          securityContext:
            allowPrivilegeEscalation: true
            capabilities:
              drop:
                - ALL
              add:
                - NET_BIND_SERVICE
            # www-data -> 33
            runAsUser: 33
          env:
            - name: POD_NAME
              valueFrom:
                fieldRef:
                  fieldPath: metadata.name
            - name: POD_NAMESPACE
              valueFrom:
                fieldRef:
                  fieldPath: metadata.namespace
          ports:
            - name: http
              containerPort: 80
            - name: https
              containerPort: 443
          livenessProbe:
            failureThreshold: 3
            httpGet:
              path: /healthz
              port: 10254
              scheme: HTTP
            initialDelaySeconds: 10
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 10
          readinessProbe:
            failureThreshold: 3
            httpGet:
              path: /healthz
              port: 10254
              scheme: HTTP
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 10

---

参考文档

https://kubernetes.io/docs/concepts/services-networking/ingress/
https://www.jianshu.com/p/e30b06906b77
https://github.com/kubernetes/ingress-nginx/tree/master/deploy
https://github.com/kubernetes/ingress-nginx/blob/master/docs/deploy/baremetal.md
Ingress对象及参数:https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md

http://www.updn.cn

Kubernetes用户界面Web UI(Dashboard)

介绍
仪表板是基于Web的Kubernetes用户界面。您可以使用仪表板将容器化应用程序部署到Kubernetes集群,对容器化应用程序进行故障排除,以及管理集群资源。您可以使用仪表板来概述群集上运行的应用程序,以及创建或修改单个Kubernetes资源(例如部署,作业,守护进程等)。例如,您可以使用部署向导扩展部署,启动滚动更新,重新启动Pod或部署新应用程序。

介绍

仪表板是基于Web的Kubernetes用户界面。您可以使用仪表板将容器化应用程序部署到Kubernetes集群,对容器化应用程序进行故障排除,以及管理集群资源。您可以使用仪表板来概述群集上运行的应用程序,以及创建或修改单个Kubernetes资源(例如部署,作业,守护进程等)。例如,您可以使用部署向导扩展部署,启动滚动更新,重新启动Pod或部署新应用程序。

部署

kubectl create -f https://raw.githubusercontent.com/kubernetes/dashboard/master/aio/deploy/recommended/kubernetes-dashboard.yaml

访问

修改yaml
image: registry.cn-hangzhou.aliyuncs.com/google_containers/kubernetes-dashboard-amd64:v1.10.1
1、deployment port 增加 9090, 原本镜像中就是有9090 非安全端口的,只是yaml文件没有暴露出来

2、deployment args 下面 ‘- –auto-generate-certificates’ 注释掉, 前面添加 #
3、service 增加端口,target指向9090

4、注意记得添加type: NodePort 配置nodeport,方便通过节点ip+nodeport 访问,即输入 k8s节点ip:32000 就可以访问到dashboard

5、修改下载的kubernetes-dashboard.yaml文件,更改RoleBinding修改为ClusterRoleBinding,并且修改roleRef中的kind和name,用cluster-admin这个非常牛逼的ClusterRole(超级使用户权限,其拥有访问kube-apiserver的所有权限)。如下:

---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: kubernetes-dashboard-minimal
  namespace: kube-system
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: cluster-admin
subjects:
- kind: ServiceAccount
  name: kubernetes-dashboard
  namespace: kube-system

TOKEN:
kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk ‘{print $1}’)
kubectl proxy –address=’10.26.14.148′ –disable-filter=true
http://10.26.14.148:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/

参考文章:
https://kubernetes.io/docs/tasks/access-application-cluster/web-ui-dashboard/
https://www.jianshu.com/p/acf1e15e9200
https://blog.csdn.net/qq1083062043/article/details/84949924

docker修改默认的Docker Root Dir路径/var/lib/docker

方法一
停止docker服务,systemctl stop docker
vi /etc/docker/daemon.json
增加选项 “graph”: “/home/docker”,
启动docker服务,systemctl start docker

方法一

停止docker服务,systemctl stop docker
vi /etc/docker/daemon.json
增加选项 “graph”: “/home/docker”,
启动docker服务,systemctl start docker

示例:

方法二

cd /var/lib/
mv docker/* /home/docker
rm -rf docker
ln -s /home/docker/ /var/lib/docker

在Kubernetes集群中创建CronJob运行自动化任务

创建一个Cron作业
kubectl create -f https://raw.githubusercontent.com/kubernetes/website/master/content/en/examples/application/job/cronjob.yaml
apiVersion: batch/v1beta1
kind: CronJob
metadata:
name: hello
spec:
schedule: “*/1 * * * *”
jobTemplate:
spec:
template:
spec:
containers:
– name: hello
image: busybox
args:
– /bin/sh
– -c
– date; echo Hello from the Kubernetes cluster
restartPolicy: OnFailure

或者通过kubectl run方式创建

创建一个Cron作业

kubectl create -f https://raw.githubusercontent.com/kubernetes/website/master/content/en/examples/application/job/cronjob.yaml
apiVersion: batch/v1beta1
kind: CronJob
metadata:
  name: hello
spec:
  schedule: "*/1 * * * *"
  jobTemplate:
    spec:
      template:
        spec:
          containers:
          - name: hello
            image: busybox
            args:
            - /bin/sh
            - -c
            - date; echo Hello from the Kubernetes cluster
          restartPolicy: OnFailure

或者通过kubectl run方式创建

kubectl run hello --schedule="*/1 * * * *" --restart=OnFailure --image=busybox -- /bin/sh -c "date; echo Hello from the Kubernetes cluster"

查看cronjob状态及结果

kubectl get cronjob hello
kubectl get jobs --watch

删除Cron作业

kubectl delete cronjob hello

参考文档:
https://kubernetes.io/docs/tasks/job/automated-tasks-with-cron-jobs/

Kubernetes secret存储和管理敏感信息

Kubernetes Secret是一个包含少量敏感数据的对象,如密码,令牌或密钥,如果这些信息放到pod或image中,存在很大的安全风险。为了降低密码泄露的风险,可以创建一个secretKeyRef,并在pod定义的时候使用。

Kubernetes Secret是一个包含少量敏感数据的对象,如密码,令牌或密钥,如果这些信息放到pod或image中,存在很大的安全风险。为了降低密码泄露的风险,可以创建一个secretKeyRef,并在pod定义的时候使用。

创建

# Create files needed for rest of example.
$ echo -n 'admin' > ./username.txt
$ echo -n '1f2d1e2e67df' > ./password.txt

$ kubectl create secret generic db-user-pass --from-file=./username.txt --from-file=./password.txt
secret "db-user-pass" created

查看

kubectl get secrets
kubectl describe secrets/db-user-pass

使用

官方文档:https://kubernetes.io/docs/concepts/configuration/secret/

使用kubeadm安装单个master的Kubernetes 1.13集群

硬件条件:
RHEL/CentOS系统或更多
每台机器2GB或更多RAM
每台机器2个CPU或更多CPU
每个节点的唯一主机名,MAC地址和product_uuid
保证端口未被占用,master端口:6443,2379-2380,10250,10251,10252 node端口:10250,30000-32767

硬件条件:

RHEL/CentOS系统或更多
每台机器2GB或更多RAM
每台机器2个CPU或更多CPU
每个节点的唯一主机名,MAC地址和product_uuid
保证端口未被占用,master端口:6443,2379-2380,10250,10251,10252 node端口:10250,30000-32767

资源分配:

节点 ip 服务 备注
k8s01-test.mars.ljnode.com 10.26.14.148 Kubernetes API server,etcd server client AP,Kubelet API,kube-scheduler,kube-controller-manager master
k8s02-test.mars.ljnode.com 10.26.14.233 Kubelet API,NodePort Services** node
k8s03-test.mars.ljnode.com 10.26.14.217 Kubelet API,NodePort Services** node

机器初始化

为了让docker能够启动,对我司腾讯云机器内核模块加载配置进行修改,没有的忽略此步骤

vim /etc/modprobe.d/blacklist.conf 
 注释掉下面内容: 
#blacklist nf_conntrack 
#blacklist nf_conntrack_ipv6 
#blacklist xt_conntrack 
#blacklist nf_conntrack_ftp 
#blacklist xt_state 
#blacklist iptable_nat 
#blacklist ipt_REDIRECT 
#blacklist nf_nat 
#blacklist nf_conntrack_ipv4 
vim /etc/modprobe.d/connectiontracking.conf 注释掉文件所有内容

为了能让kubernetes很好的运行,需要把swap关闭

swapoff -a

禁用SELINUX

setenforce 0
sed -i 's/^SELINUX=enforcing$/SELINUX=permissive/' /etc/selinux/config

关闭防火墙

systemctl stop firewalld
systemctl disable firewalld

确认一下iptables filter表中FOWARD链的默认策略(pllicy)为ACCEPT。

[root@k8s01-test gaoyaohua001]# iptables -nvL
Chain INPUT (policy ACCEPT 0 packets, 0 bytes)
 pkts bytes target     prot opt in     out     source               destination

Chain FORWARD (policy ACCEPT 0 packets, 0 bytes)
 pkts bytes target     prot opt in     out     source               destination

Chain OUTPUT (policy ACCEPT 0 packets, 0 bytes)
 pkts bytes target     prot opt in     out     source               destination

RHEL/CentOS 7上的一些用户报告了由于iptables被绕过而导致流量路由不正确的问题,为了flannel正常工作,你必须通过–pod-network-cidr=10.244.0.0/16到kubeadm init。
设置/proc/sys/net/bridge/bridge-nf-call-iptables为1通过运行sysctl net.bridge.bridge-nf-call-iptables=1 将桥接的IPv4流量传递到iptables的链。这是一些CNI插件工作的要求必须。同时要确认 lsmod | grep br_netfilter

cat <<EOF >  /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
modprobe br_netfilter
sysctl -p /etc/sysctl.d/k8s.conf

由于ipvs已经加入到了内核的主干,所以为kube-proxy开启ipvs的前提需要加载以下的内核模块:

ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack_ipv4
在所有的Kubernetes节点上执行以下脚本:

cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4

上面脚本创建了的/etc/sysconfig/modules/ipvs.modules文件,保证在节点重启后能自动加载所需模块。 使用lsmod | grep -e ip_vs -e nf_conntrack_ipv4命令查看是否已经正确加载所需的内核模块。

接下来还需要确保各个节点上已经安装了ipset软件包yum install ipset。 为了便于查看ipvs的代理规则,最好安装一下管理工具ipvsadm yum install ipvsadm。

如果以上前提条件如果不满足,则即使kube-proxy的配置开启了ipvs模式,也会退回到iptables模式。

所有节点安装

CRI(Container Runtime Interface)默认情况下使用的是Docker
docker-ce-18.06.2.ce安装:

#!/bin/bash
# Install Docker CE
## Set up the repository
### Install required packages.
    yum install yum-utils device-mapper-persistent-data lvm2

### Add docker repository.
yum-config-manager \
    --add-repo \
    https://download.docker.com/linux/centos/docker-ce.repo

## Install docker ce.
yum update && yum install docker-ce-18.06.2.ce

## Create /etc/docker directory.
mkdir /etc/docker

# Setup daemon.
cat > /etc/docker/daemon.json <<EOF
{
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m"
  },
  "storage-driver": "overlay2",
  "storage-opts": [
    "overlay2.override_kernel_check=true"
  ]
}
EOF

mkdir -p /etc/systemd/system/docker.service.d

# Restart docker.
systemctl enable docker.service
systemctl daemon-reload
systemctl restart docker

kubeadm,kubelet和kubectl安装:
这里的官网google源被我替换为aliyun了,如果服务器可以科学上网的,可以使用官网提供的源。

#!/bin/bash
#kubernetes install kubelet kubeadm kubectl
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
exclude=kube*
EOF

yum install -y kubelet kubeadm kubectl --disableexcludes=kubernetes

systemctl enable --now kubelet

服务启动后会拉取一些国外的基础服务镜像,下列操作可以解决这个问题。如果服务器可以科学上网,忽略下边这步。
执行下列脚本

#!/bin/bash
#kubernetes need kubelet kubeadm kubectl images

#kube-apiserver:v1.13.3
#kube-controller-manager:v1.13.3
#kube-scheduler:v1.13.3
#kube-proxy:v1.13.3
#pause:3.1
#etcd:3.2.24
#coredns:1.2.6

list=("kube-apiserver:v1.13.3" "kube-controller-manager:v1.13.3" "kube-scheduler:v1.13.3" "kube-proxy:v1.13.3" "pause:3.1" "etcd:3.2.24")
for i in ${list[@]}
do
	docker pull mirrorgooglecontainers/$i
	docker tag mirrorgooglecontainers/$i k8s.gcr.io/$i
	docker rmi mirrorgooglecontainers/$i
done

docker pull coredns/coredns:1.2.6
docker tag coredns/coredns:1.2.6 k8s.gcr.io/coredns:1.2.6
docker rmi coredns/coredns:1.2.6

master节点安装

执行初始化命令 kubeadm init <args>
参数说明:
For flannel to work correctly, you must pass –pod-network-cidr=10.244.0.0/16 to kubeadm init.
kubeadm uses the network interface associated with the default gateway to advertise the master’s IP. To use a different network interface, specify the –apiserver-advertise-address=<ip-address> argument to kubeadm init

kubeadm init --pod-network-cidr=10.244.0.0/16

安装完后无法执行kubectl命令,如果您是root用户,可以运行下面命令,持久生效请追加写入/root/.bashrc文件里:

export KUBECONFIG=/etc/kubernetes/admin.conf

要使kubectl为非root用户工作,请运行以下命令:

mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

为了可以让kubectl 命令后的参数能够自动补全

echo "source <(kubectl completion bash)" >> ~/.bashrc

您必须在任何应用程序启动之前部署安装pod网络插件,以便您的pod可以相互通信,
每个群集只能安装一种Pod网络,我们上边kubeadm init的时候指定了flannel:

kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/a70459be0084506e4ec919aa1c114638878db11b/Documentation/kube-flannel.yml

安装完pod网络后,您可以执行下列命令检查CoreDNS pod是否正常工作,一旦CoreDNS pod启动并运行正常,您可以继续加入您的节点。

kubectl get pods --all-namespaces

默认情况下,出于安全原因,您的群集不会在主服务器上安排pod,如果您希望能够在主服务器上安排pod,例如,对于用于开发的单机Kubernetes集群,请运行

kubectl taint nodes --all node-role.kubernetes.io/master-

node节点安装

 kubeadm join 10.26.14.148:6443 --token xldd1m.u1tvqxhgndng4hwq --discovery-token-ca-cert-hash sha256:52c5f7e4296b7b5a8cb438df7e18a1212b8112d102935b293d50bd6369842cf3

登陆master执行下列命令查看,如果NotReady,等待一会就好了

[root@k8s01-test gaoyaohua001]# kubectl get nodes
NAME                         STATUS     ROLES    AGE     VERSION
k8s01-test.mars.ljnode.com   Ready      master   19m     v1.13.3
k8s02-test.mars.ljnode.com   Ready      <none>   4m35s   v1.13.3
k8s03-test.mars.ljnode.com   NotReady   <none>   4m16s   v1.13.3

看到node没有标签,可以通过下边命令,修改node或者master节点的标签

kubectl label node k8s02-test.mars.ljnode.com  node-role.kubernetes.io/node='node'

默认token的有效期为24小时,当过期之后,该token就不可用了。
//如过期,这时候加入新的node节点需要创建token重新生成

kubeadm token create 

kubeadm token list

1tlid0.jqr1dxk3fbpmtn1l

openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //‘

7159754590021250eb237130e655476d32c2bde6443ad5bfbe62d45978422c50

kubeadm join 192.168.61.11:6443 --token 1tlid0.jqr1dxk3fbpmtn1l --discovery-token-ca-cert-hash sha256:7159754590021250eb237130e655476d32c2bde6443ad5bfbe62d45978422c50

参考文档:
https://kubernetes.io/docs/setup/independent/install-kubeadm/

http://www.updn.cn

Kubernetes集群中部署单实例MySQL

Mysql服务属于有状态的单实例应用程序,因为/var/lib/mysql目录需要持久化存储。
我们在部署mysql服务之前,首先创建了一个查找20G卷的PersistentVolumeClaim,使用PersistentVolumeClaim将其连接到现有PersistentVolume来运行有状态应用程序。

Mysql服务属于有状态的单实例应用程序,因为/var/lib/mysql目录需要持久化存储。
我们在部署mysql服务之前,首先创建了一个查找20G卷的PersistentVolumeClaim,使用PersistentVolumeClaim将其连接到现有PersistentVolume来运行有状态应用程序。

创建PV和PVC

kubectl create -f https://k8s.io/examples/application/mysql/mysql-pv.yaml

kind: PersistentVolume
apiVersion: v1
metadata:
  name: mysql-pv-volume
  labels:
    type: local
spec:
  storageClassName: manual
  capacity:
    storage: 20Gi
  accessModes:
    - ReadWriteOnce
  hostPath:
    path: "/mnt/data"
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
  name: mysql-pv-claim
spec:
  storageClassName: manual
  accessModes:
    - ReadWriteOnce
  resources:
    requests:
      storage: 20Gi

kubectl get pv
Kubectl get pic

创建mysql服务deployment

kubectl create -f https://k8s.io/examples/application/mysql/mysql-deployment.yaml

apiVersion: v1
kind: Service
metadata:
  name: mysql
spec:
  ports:
  - port: 3306
  selector:
    app: mysql
  clusterIP: None
---
apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2
kind: Deployment
metadata:
  name: mysql
spec:
  selector:
    matchLabels:
      app: mysql
  strategy:
    type: Recreate
  template:
    metadata:
      labels:
        app: mysql
    spec:
      containers:
      - image: mysql:5.6
        name: mysql
        env:
          # Use secret in real usage
        - name: MYSQL_ROOT_PASSWORD
          value: password
        ports:
        - containerPort: 3306
          name: mysql
        volumeMounts:
        - name: mysql-persistent-storage
          mountPath: /var/lib/mysql
      volumes:
      - name: mysql-persistent-storage
        persistentVolumeClaim:
          claimName: mysql-pv-claim

kubectl get pods -l app=mysql
kubectl describe deployment mysql

使用客户端链接mysql

kubectl run -it --rm --image=mysql:5.6 --restart=Never mysql-client -- mysql -h mysql -ppassword

删除服务及存储

kubectl delete deployment,svc mysql
kubectl delete pvc mysql-pv-claim
kubectl delete pv mysql-pv-volume

参考文档:
https://kubernetes.io/docs/tasks/run-application/run-single-instance-stateful-application/