From 1be5d1a67259d1d70c80861ddba7fd02ec3da31b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Hannes=20K=C3=B6rber?= Date: Mon, 27 May 2019 19:33:34 +0200 Subject: [PATCH] Add fix for s3 put chunk size limit --- backup.sh | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/backup.sh b/backup.sh index cbd25d2..72967ae 100755 --- a/backup.sh +++ b/backup.sh @@ -75,6 +75,13 @@ timestamp="$(date --utc -Iseconds)" | aws \ s3 cp \ --storage-class DEEP_ARCHIVE \ + # specify max object size of 5TB here. This makes aws-cli use a bigger + # chunk size for the multipart upload. Otherwise, objects >5GB cannot + # be uploaded because the max chunk count is 1e4 + # + # It's very hacky because it makes uploads of small files more + # inefficient + --expected-size=$(5*1000*1000*1000) \ - \ "s3://${bucket}/${name}-${timestamp}/${filepath##+(/)}.tar.gz.gpg" done