

[crypt_my.sftp.remote]
type = crypt
suffix = none
directory_name_encryption = true
filename_encoding = base32
filename_encryption = standard
password = XXX
remote = my.sftp.remote:crypt
[crypt_local]
type = crypt
suffix = none
directory_name_encryption = true
filename_encoding = base32
filename_encryption = standard
password = XXX
remote = /Users/me/crypt_local
[my.sftp.remote]
type = sftp
key_pem = XXX
md5sum_command = md5sum
port = 22
sha1sum_command = sha1 -r
shell_type = unix
user = XXX
host = XXX
[s3drive_auto_XXX]
type = s3
no_check_bucket = true
provider = Other
transfers = 4
access_key_id = XXX
chunk_size = 10.49M
endpoint = https://storage.kapsa.io
region = us-east-1
secret_access_key = XXX
upload_concurrency = 5
upload_cutoff = 220.20M
[s3drive_enc_username]
type = crypt
directory_name_encryption = true
filename_encoding = base64
filename_encryption = standard
password = XXX
remote = s3drive_auto_XXX:bucket
suffix = none
Let me know if I can provide any more info.
Copy downloading from encrypted s3Drive storage remote, then keep app running as always and monitor RAM.
If it's all good then you could pause this job and unpause 2nd, and so on.
Obviously if you don't have time or willingness to do couple more tests, that's fine, we'll need to reproduce this on our ends and trace where the memory leak is.
Thanks! (edited)



1.16.2+1 release we've improved app memory usage in the area of multipart uploads, but also freeing up memory for Sync (and other Rclone) actions.
This might improve the situation and/or reduce ration where memory creeps upwards, although I am still not quite sure whether this is going to address "explosions to 84GB" which I believe might be caused by some other malfunction.
I would appreciate if you could give it a go and let me know if you've observed and positive change.
Thanks!