Add fix for s3 put chunk size limit

This commit is contained in:
Hannes Körber
2019-05-27 19:33:34 +02:00
parent de4caeea7e
commit 1be5d1a672

View File

@@ -75,6 +75,13 @@ timestamp="$(date --utc -Iseconds)"
| aws \ | aws \
s3 cp \ s3 cp \
--storage-class DEEP_ARCHIVE \ --storage-class DEEP_ARCHIVE \
# specify max object size of 5TB here. This makes aws-cli use a bigger
# chunk size for the multipart upload. Otherwise, objects >5GB cannot
# be uploaded because the max chunk count is 1e4
#
# It's very hacky because it makes uploads of small files more
# inefficient
--expected-size=$(5*1000*1000*1000) \
- \ - \
"s3://${bucket}/${name}-${timestamp}/${filepath##+(/)}.tar.gz.gpg" "s3://${bucket}/${name}-${timestamp}/${filepath##+(/)}.tar.gz.gpg"
done done