首页 > 解决方案 > botocore.exceptions.NoCredentialsError:无法找到凭据(k8 和 docker)

问题描述

我正在部署一个简单的 cronjob,从我的私人存储库中调用图像(运行 aws cli、boto3 和 zip)。该图像基本上运行我的服务器的备份,然后将日志转储到 s3 中。使用我以前的 python3 图像,这项工作完美地完成了。现在我正在使用我的 repo 中最新的 python3,一旦我运行了这个作业,我就会得到那个错误。我很困惑为什么在使用相同的设置时会出现此错误。

查看我的代码:

在部署 cronjob 的主机上设置 AWS 凭证:

[default]
aws_access_key_id=XXXXXXXXXXXXXX
aws_secret_access_key=YYYYYYYYYYYYYYYYYYYYYYYYYYY
region = value

码头入口点.sh

#! /bin/sh
PASSWORD=$(echo $PASSWORD | tr -d '\n') # added to remove extra empty line within container before printing PASSWORD 
export PASSWORD
export ENDPOINT=$ENDPOINT
python3 $1 $2 $ENDPOINT $3 $4 $5 $6 $7
python3 /usr/src/app/backup-aws-s3.py
sleep 1200

Dockerfile

FROM myregistry.k8s/python38:latest AS compile-image

USER root

RUN set -ex
## install dependencies
RUN dnf upgrade -y && \
    dnf install -y gcc gcc-c++ ca-certificates ncurses-devel openssl-devel bzip2-devel libffi-devel

## virtualenv
ENV VIRTUAL_ENV=/opt/venv
RUN python3 -m venv $VIRTUAL_ENV
ENV PATH="$VIRTUAL_ENV/bin:$PATH"

## add and install requirements
RUN python3 -m pip install --upgrade pip && python3 -m pip install pip-tools moto
COPY ./requirements.in .
RUN pip-compile requirements.in > requirements.txt && pip-sync
RUN python3 -m pip install -r requirements.txt

## runtime-image
FROM myregistry.k8s/python38:latest AS runtime-image

USER root

RUN set -ex
## install netcat
RUN dnf upgrade -y && \
    dnf install -y nc gcc gcc-c++ ca-certificates ncurses-devel openssl-devel bzip2-devel libffi-devel
RUN python3 -m pip install --upgrade pip && python3 -m pip install awscli --upgrade --user
RUN python3 -m pip install boto3 moto urllib3

## copy Python dependencies from build image
COPY --from=compile-image /opt/venv /opt/venv

## set working directory
WORKDIR /usr/src/app

## add user and group
RUN groupadd --system user && adduser --system user --no-create-home --gid user
RUN chown -R user:user /usr/src/app && chmod -R 777 /usr/src/app

## add app
COPY src/* /usr/src/app/
COPY ./docker-entrypoint.sh /
RUN chmod +x /docker-entrypoint.sh

## switch to non-root user
USER user

## set environment variables
ENV PYTHONDONTWRITEBYTECODE 1
ENV PYTHONUNBUFFERED 1
ENV PATH="/opt/venv/bin:$PATH"

## run service
ENTRYPOINT ["/docker-entrypoint.sh"]
CMD ["/usr/src/app/backup.pyz", "--backup-all", "--username", "backup", "--no-verify", "--destination", "/usr/src/app"]

备份-aws-s3.py

#!/usr/bin/env python

import logging
import os, string
import datetime, time
from shutil import rmtree
import glob
import boto3
from botocore.exceptions import ClientError

""" functor hook that delegates the specific hook operation for all functions to avoid code duping """
def range_dir_and_do (functor, src_dir, except_err):
    glob_dir = glob.glob(src_dir, recursive=True)
    for dir_ent in glob_dir:
        try:
            return functor(dir_ent)
        except OSError:
            print("Error while deleting directory")
    
    
""" functor to change appgate generated receipt directory with encoded datetime stamp for S3 """
def datetime_dir (src_dir):
    datetimestamp = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
    src_dirstr = src_dir.rsplit('_', 1)[0]
    dirpath = os.path.join(src_dirstr, datetimestamp).replace('/', '_')  # os.path.join() adds as default the '/'
    # rename the directory now
    os.rename (src_dir, dirpath)
    return dirpath, datetimestamp;  

    
""" change the directory to a dateime directory name for S3 """    
def datetime_appgate_dir(functor, src_dir, except_err):
    return range_dir_and_do(functor, src_dir, except_err)
    
""" delete the directory after uploading to S3 """
def del_appgate_dir (functor, src_dir, except_err):
    return range_dir_and_do(functor, src_dir, except_err)
            

def s3_upload_file(s3_file_name, s3_storage_loc, s3_file_key_name=None):
      
    # If S3 s3_file_key_name was not specified, use file_name
    if s3_file_key_name is None:
        s3_file_key_name = s3_file_name

    # Upload the file
    s3_client = boto3.client('s3', region_name='region') # masked region
    reponse = False
    try:
        response = s3_client.upload_file(s3_file_name, s3_storage_loc, s3_file_key_name)
    except ClientError as e:
        logging.error(e)
        return response
    response = True
    return response


""" upload local directory to s3 pail path in AWS """
def s3_upload_directory(path, s3_pail_path):
    s3_client = boto3.client('s3', region_name='region') masked region
    s3_uploaded = False
    try:
        for root, dir_ents, files in os.walk(path):
            for file in files: 
                s3_uploaded = s3_client.upload_file(os.path.join(root, file), s3_pail_path, file)
    except ClientError as e:
        logging.error(e)
        return s3_uploaded
    s3_uploaded = True
    return s3_uploaded


def s3_upload_full_directory(src_dirpath, s3_pail_path):
    s3_bucket = s3_pail_path
    folder = src_dirpath

    key_name = folder + '/' 
    s3_connect = boto3.client('s3', region_name='region') masked region

    # upload File to S3
    for filename in os.listdir(folder):
        file_key_name = folder + '/' + filename
        local_path = os.getcwd()
        local_name = local_path + '/' + key_name + filename
        upload = s3_connect.upload_file(local_name, s3_bucket, file_key_name)
        
s3_upload_pail_path = 'my_bucket' # bucket name
s3_upload_dir_pattern = '/usr/src/app/backup_*' # generated backup directory


# call 1
dirpath, datetimestamp = datetime_appgate_dir(datetime_dir, s3_upload_dir_pattern, "error joining directory paths")
# call 2
s3_upload_full_directory(dirpath, s3_upload_pail_path)
# call 3
del_appgate_dir(rmtree, s3_upload_dir_pattern, "error deleting directory")

Helm 图表值

jobs:
    - name: backup-cron
      securityContext:
        runAsUser: 65534
        runAsGroup: 65534
        fsGroup: 65534
      image:
        repository: myregistry.k8s/python38
        tag: backup
        imagePullPolicy: Always
      
      imagePullSecrets: backup-cron # secret to pull image
      
      env:
        - name: PASSWORD
          valueFrom:
            secretKeyRef:
              name: touch
              key: touch

        - name: ENDPOINT
          value: "https://00.000.00.00:000"
  
      schedule: "27 22 * * *"  # update test time
      failedJobsHistoryLimit: 1
      successfulJobsHistoryLimit: 3
      concurrencyPolicy: Forbid
      restartPolicy: Never

标签: dockeramazon-s3kubernetesboto3

解决方案


推荐阅读