前置

接入新的k8s集群中,老集群不再需要,进行迁移

jenkins k8s安装部署

jenkins的k8s ymal文件

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
labels:
k8s-app: jenkins
name: jenkins
namespace: ci
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: jenkins
strategy:
rollingUpdate:
maxSurge: 1
maxUnavailable: 1
type: RollingUpdate
template:
metadata:
annotations:
redeploy-timestamp: '1639970531265'
labels:
k8s-app: jenkins
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: group
operator: In
values:
- d
containers:
- env:
- name: JAVA_OPTS
value: >-
-Dio.jenkins.plugins.casc.ConfigurationAsCode.initialDelay=9000
-Duser.timezone=Asia/Shanghai
-Dhudson.model.DirectoryBrowserSupport.CSP="script-src
'unsafe-inline'"
image: 'jenkins/jenkins:2.277.3-lts'
imagePullPolicy: Always
name: jenkins
resources:
limits:
cpu: '1'
memory: 4Gi
requests:
cpu: '1'
memory: 2Gi
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /var/jenkins_home
name: home
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: jenkins
serviceAccountName: jenkins
terminationGracePeriodSeconds: 30
tolerations:
- effect: NoSchedule
key: devops
operator: Equal
value: run
volumes:
- name: home
persistentVolumeClaim:
claimName: jenkins

jenkins中pipeline部署文件及相应问题

  1. pipeline文件,迁移后打包过程中换了docker没法运行了,有尝试使用/var/run/docker.sock链接换成contained相应的,但没成功
  2. maven此处需要创建相应的maven-repo,以便此pod能够运行起来
  3. 还想使用virtual虚拟节点来进行启动此pod
  4. kubernets 插件Configure Clouds配置相应的集群信息及test连接,还可以把pod模板写这里,及连接的token要加到配置文件中
    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    70
    71
    72
    73
    74
    75
    76
    77
    78
    79
    80
    81
    82
    83
    84
    85
    86
    87
    88
    89
    90
    91
    92
    93
    94
    95
    96
    97
    98
    99
    100
    101
    102
    103
    104
    105
    106
    107
    108
    109
    110
    111
    112
    113
    114
    115
    116
    117
    118
    119
    120
    121
    122
    123
    124
    125
    126
    127
    128
    129
    130
    131
    132
    133
    134
    135
    136
    137
    138
    139
    140
    141
    142
    143
    144
    145
    146
    147
    148
    149
    150
    151
    152
    153
    154
    pipeline {
    agent {
    kubernetes {
    label "mypod-${UUID.randomUUID().toString()}"
    defaultContainer 'kubectl'
    yaml """
    apiVersion: v1
    kind: Pod
    metadata:
    name: jenkins-agents
    spec:
    nodeSelector:
    group: d
    containers:
    - name: git
    image: alpine/git
    command:
    - cat
    tty: true
    - name: maven
    image: registry.cn-shenzhen.aliyuncs.com/xxx/maven:xxx
    command:
    - cat
    tty: true
    volumeMounts:
    - mountPath: /usr/share/maven/ref
    name: maven-repo
    readOnly: false
    - name: docker
    image: docker:17.12.0-ce-dind
    privileged: true
    command:
    - cat
    tty: true
    volumeMounts:
    - mountPath: /var/run/docker.sock
    name: docker-sock
    - name: kubectl
    image: registry.cn-shenzhen.aliyuncs.com/xxx/kubectl:v1.10.5
    command:
    - cat
    tty: true
    volumes:
    - name: maven-repo
    persistentVolumeClaim:
    claimName: maven-repo
    - name: docker-sock
    hostPath:
    path: /var/run/docker.sock
    """
    }
    }
    stages {
    stage('编译测试') {
    stages {
    stage('Git 拉取代码') {
    steps {
    container('git') {
    git credentialsId: 'gitlab-ssh', url: ''
    }
    }
    }
    stage('Maven 编译打包') {
    steps {
    container('maven') {
    sh 'mvn clean package -Dautoconfig.skip=true -Dmaven.test.skip=true -Dmaven.test.failure.ignore=true'
    }
    }
    }
    }
    }

    stage('构建发布') {
    environment {
    dockerNamespace = 'xxx'
    project = 'myproject'
    kubernetesNamespace = 'b'

    dockerRegistry = 'xxx-registry-vpc.cn-hangzhou.cr.aliyuncs.com'
    tag = "${BUILD_TIMESTAMP}-${env.BUILD_NUMBER}"
    image = "${dockerRegistry}/${dockerNamespace}/${project}:${tag}"
    apiServer = 'https://192.168.0.x:6443'
    dockerUsername = credentials('docker-username')
    dockerPassword = credentials('docker-password')
    }
    stages {
    stage('构建 Docker 镜像') {
    steps {
    container('docker') {
    sh "docker login -u${dockerUsername} -p${dockerPassword} ${dockerRegistry}"
    sh "docker build -t ${image} ."
    sh "docker push ${image}"
    }
    }
    }
    stage('发布到 k8s') {
    steps {
    container('kubectl') {
    withKubeConfig(caCertificate: '', credentialsId: 'kubernetes-token', serverUrl: "${apiServer}") {
    sh "kubectl set image deployment/${project} ${project}=${image} -n ${kubernetesNamespace} --record=true"
    sh "kubectl get po -o wide -n ${kubernetesNamespace}"
    }
    }
    }
    }
    }
    }
    }
    post {
    always {
    echo 'One way or another, I have finished'
    }
    success {
    dingtalk (
    robot: 'jenkins-build',
    type:'ACTION_CARD',
    atAll: false,
    title: "构建成功:${env.JOB_NAME}",
    messageUrl: "${env.JOB_URL}",
    text: [
    "### [${env.JOB_NAME}](${env.JOB_URL}) ",
    "- 任务:[${currentBuild.displayName}](${env.BUILD_URL})",
    '- 状态:<font color=#00FF00 >成功</font>',
    "- 持续时间:${currentBuild.durationString}".split("and counting")[0],
    "- 执行人:${currentBuild.buildCauses.shortDescription}",
    ]
    )
    }
    unstable {
    echo 'I am unstable :/'
    }
    failure {
    dingtalk (
    robot: 'jenkins-build',
    type:'ACTION_CARD',
    atAll: false,
    title: "构建失败:${env.JOB_NAME}",
    messageUrl: 'xxxx',
    text: [
    "### [${env.JOB_NAME}](${env.JOB_URL}) ",
    "- 任务:[${currentBuild.displayName}](${env.BUILD_URL})",
    '- 状态:<font color=#EE0000 >失败</font>',
    "- 持续时间:${currentBuild.durationString}".split("and counting")[0],
    "- 执行人:${currentBuild.buildCauses.shortDescription}",
    ]
    )
    }
    changed {
    echo 'Things were different before...'
    }
    }

    }

问题处理

  1. 迁移的话数据量较大,主要是Discard old builds没有删除, jobs目录占用特别大的空间,可通过Configuration Slicing plugin批量来修改,下次构建或者jenkins系统多长时间自动会处理,设置保留15天应该差不多了。占用空间就很小了,也可手动删除 $JENKINS_HOME/jobs/[JOB_NAME]/builds
    1
    2
    3
    4
    Discard Old Builds Slicer - Days to keep artifacts
    Discard Old Builds Slicer - Days to keep builds
    Discard Old Builds Slicer - Max # of builds to keep
    Discard Old Builds Slicer - Max # of builds to keep with artifacts
  2. 将老的jenkins的home目录copy一份, nas进行指向
  3. 版本没升级,升级好像有些问题,需要兼容以前的老的
  4. 新集群没使用docker作为容器运行时,导致没法docker login和docker push,后集群改为docker容器运行时,查了应该可以使用crictl和ctr进行相应处理,未实践 aliyun