cancel
Showing results for 
Show  only  | Search instead for 
Did you mean: 
Announcements
Want to know more about how you can find anything and protect everything? Check it out here.

Dropbox API Support & Feedback

Find help with the Dropbox API from other developers.

cancel
Showing results for 
Show  only  | Search instead for 
Did you mean: 

System.Threading.Tasks.TaskCanceledException: A task was canceled.

System.Threading.Tasks.TaskCanceledException: A task was canceled.

Engg
Explorer | Level 3
Go to solution

We have 500 machine which upload file at same time. We are getting RateLimitException. We are doing a retry after waiting rateLimitException.ErrorResponse.RetryAfter seconds.

to test this. I came up with a console app.
Following is my code

public async Task UploadDBparallelly(List<string> dbLocalFilesPath)
{


List<Task> tasks = new List<Task>();

foreach (var item in dbLocalFilesPath.Select((value, i) => new { i, value }))
{

string dbLocalPath = item.value;
int index = item.i;
Logger.Instance.LogInfo($"dbLocalPath: {dbLocalPath}");
tasks.Add(Task.Run(async () =>
{
try
{

//var uploadPathWithFileName = GetFilePathWithFileName(dbLocalPath);

// Your console app logic goes here
await Upload(dbLocalPath, GetFilePathWithFileName(dbLocalPath));
}
catch (Exception ex)
{
Logger.Instance.LogError($"Error Exception {ex}");

}
}));
}
try
{
await Task.WhenAll(tasks);
}
catch (Exception ex)
{
// Handle any exceptions thrown by tasks here
Logger.Instance.LogError($"Error Exception {ex}");
}
}

public static async Task Upload(string localPath, string remotePath)
{
try
{
string accessToken = ConfigurationManager.AppSettings["AccessToken"];
using (DropboxClient client = new DropboxClient(accessToken))
{

using (FileStream fileStream = System.IO.File.Open(localPath, FileMode.Open))
{
//Converting bytes to Megabyte
long fileSize = fileStream.Length / 1024 / 1024;
Logger.Instance.LogInfo($"Total File Size of backup file : {fileSize} MB");
//File Should be under 150MB
if (fileSize <= 150)
{
Logger.Instance.LogDebug("started direct upload backup to dropbox");
FilesUserRoutes files = client.Files;
string path = remotePath;
Stream stream = (Stream)fileStream;
DateTime? clientModified = new DateTime?();
Stream body = stream;
FileMetadata fileMetadata = await files.UploadAsync(path, clientModified: clientModified, body: body);
}
else
{

Logger.Instance.LogDebug("started chunk upload backup to dropbox");
const int chunkSize = 4096 * 1024;

await ChunkUpload(remotePath, fileStream, chunkSize, client, localPath);
}
}
}

}
catch (DropboxException dropboxException)
{

if (dropboxException.GetType().Name == typeof(RateLimitException).Name)
{
var rateLimitException = dropboxException as RateLimitException;
Logger.Instance.LogError($"Upload: Rate limit exceeded. Waiting before retrying...");

Logger.Instance.LogError($"Reason: {rateLimitException.ErrorResponse.Reason}: RetryAfter {rateLimitException.ErrorResponse.RetryAfter}");
var backoff = TimeSpan.FromSeconds(rateLimitException.ErrorResponse.RetryAfter);
Thread.Sleep(Convert.ToInt32(rateLimitException.ErrorResponse.RetryAfter) * 1000);
await Upload(localPath, remotePath); // Retry after delay
}
else
{
Logger.Instance.LogError($"dropboxException {dropboxException}");
}
}
catch (Exception ex)
{
Logger.Instance.LogError("Error in upload backup to dropbox", ex);
throw ex;
}
}

private static async Task ChunkUpload(string remotePath, FileStream fileStream, int chunkSize, DropboxClient client, string localPath)
{
try
{
Console.WriteLine($"Chunk upload file...{localPath}");

 

int numChunks = (int)Math.Ceiling((double)fileStream.Length / chunkSize);
Console.WriteLine($"SnumChunks {numChunks} ");
byte[] buffer = new byte[chunkSize];
string sessionId = null;

for (var idx = 0; idx < numChunks; idx++)
{
Console.WriteLine($"Start uploading chunk {idx} , LocalPath {localPath} ");
var byteRead = fileStream.Read(buffer, 0, chunkSize);

using (MemoryStream memStream = new MemoryStream(buffer, 0, byteRead))
{
if (idx == 0)
{
var result = client.Files.UploadSessionStartAsync(body: memStream).GetAwaiter().GetResult();
sessionId = result.SessionId;
}

else
{
UploadSessionCursor cursor = new UploadSessionCursor(sessionId, (ulong)(chunkSize * idx));

if (idx == numChunks - 1)
{
Logger.Instance.LogInfo($"DONE FOR{localPath}");
await client.Files.UploadSessionFinishAsync(cursor: cursor, commit: new CommitInfo(remotePath), body: memStream);
}

else
{
client.Files.UploadSessionAppendV2Async(cursor, body: memStream).GetAwaiter().GetResult();
}
}
}
}

}

catch (DropboxException dropboxException)
{
fileStream.Dispose();
client.Dispose();

if (dropboxException.GetType().Name == typeof(RateLimitException).Name)
{
var rateLimitException = dropboxException as RateLimitException;
Logger.Instance.LogError($"ChunkUpload: Rate limit exceeded. Waiting before retrying... RETRY FOR{localPath}");

Logger.Instance.LogError($"Reason: {rateLimitException.ErrorResponse.Reason}: RetryAfter {rateLimitException.ErrorResponse.RetryAfter}");
var backoff = TimeSpan.FromSeconds(rateLimitException.ErrorResponse.RetryAfter);
await Task.Delay(Convert.ToInt32(rateLimitException.ErrorResponse.RetryAfter) * 1000);
await Upload(localPath, remotePath); // Retry after delay
}
else
{
Logger.Instance.LogError($"dropboxException {dropboxException}");
}
}

catch (Exception ex)
{
Logger.Instance.LogError($"Error in chunk upload process {remotePath}", ex);
}
finally
{
fileStream.Dispose();
client.Dispose();
}
}

I'm getting following exception. What I might be missing?



System.Threading.Tasks.TaskCanceledException: A task was canceled.
at System.Runtime.CompilerServices.TaskAwaiter.ThrowForNonSuccess(Task task)
at System.Runtime.CompilerServices.TaskAwaiter.HandleNonSuccessAndDebuggerNotification(Task task)
at Dropbox.Api.DropboxRequestHandler.<RequestJsonString>d__2f.MoveNext()
--- End of stack trace from previous location where exception was thrown ---
at System.Runtime.ExceptionServices.ExceptionDispatchInfo.Throw()
at System.Runtime.CompilerServices.TaskAwaiter.ThrowForNonSuccess(Task task)
at Dropbox.Api.DropboxRequestHandler.<RequestJsonStringWithRetry>d__1a.MoveNext()
--- End of stack trace from previous location where exception was thrown ---
at System.Runtime.ExceptionServices.ExceptionDispatchInfo.Throw()
at System.Runtime.CompilerServices.TaskAwaiter.ThrowForNonSuccess(Task task)
at System.Runtime.CompilerServices.TaskAwaiter.HandleNonSuccessAndDebuggerNotification(Task task)
at Dropbox.Api.DropboxRequestHandler.<Dropbox.Api.Stone.ITransport.SendUploadRequestAsync>d__d`3.MoveNext()
--- End of stack trace from previous location where exception was thrown ---
at System.Runtime.ExceptionServices.ExceptionDispatchInfo.Throw()
at System.Runtime.CompilerServices.TaskAwaiter.ThrowForNonSuccess(Task task)
at System.Runtime.CompilerServices.TaskAwaiter.HandleNonSuccessAndDebuggerNotification(Task task)
at System.Runtime.CompilerServices.TaskAwaiter`1.GetResult()
at Odin.Compute.OdinDatabaseBackupManager.DropboxUploadTest.<ChunkUpload>d__9.MoveNext() in C:\Work\AWC\AWC TFS\GIT\The ONE Platform\Source\Odin.Compute.OdinDatabaseBackupManager\DropboxUploadTest.cs:line 179

1 Accepted Solution

Accepted Solutions

Здравко
Legendary | Level 20

In general modifying the folder should not affect probability for exception appearing, but cannot prevent it. Some decrement may happen if different files/folders appear in different namespaces (namespaces are locked independently, so less likely compete), but better don't rely on.

Again, try to schedule the uploads in non overlap way and handle the possible exception in proper way. Example of such exception handling (again just example and far from optimal) follows:

private readonly int retryLimit = 20;

...

var backoff = 0;
var rand = new Random();
var commitInfo = new CommitInfo(filePath.Substring(filePath.LastIndexOf('/')),
    WriteMode.Overwrite.Instance, false);
for (var i = 0; i < retryLimit && !string.IsNullOrEmpty(sessionId); ++i)
{
    if (backoff != 0)
    {
        Console.WriteLine("Ограничение при завършване на файл {0}. Нов опит след {1} милисекунди...",
            commitInfo.Path, backoff);
        Thread.Sleep(backoff);
    }
    lastCursor = new UploadSessionCursor(sessionId, (ulong)fileStream.Length);
    memStream = new MemoryStream(buffer, 0, 0);
    try
    {
        await dbx.Files.UploadSessionFinishAsync(lastCursor, commitInfo, body: memStream);
        sessionId = string.Empty;
    }
    catch (RateLimitException rateLimit)
    {
        backoff = (int)(backoff * 1.1);
        backoff = Math.Max(backoff, rateLimit.RetryAfter * 1000);
        backoff = Math.Max(backoff, 1000);
        backoff += rand.Next(500);
    }
}
if (string.IsNullOrEmpty(sessionId))
{
    Console.WriteLine("Качването на {0} приключи \ud83d\ude09.", commitInfo.Path);
}
else
{
    Console.WriteLine("Неуспешно завършване на файл {0} след {1} опита \ud83d\ude15!",
        commitInfo.Path, retryLimit);
}

The above just shows how my advices (from previous page) can be implemented. Here sessionId represents a session ready for finishing (i.e. content uploaded already).

View solution in original post

11 Replies 11

Здравко
Legendary | Level 20
Go to solution

Hi @Engg,

It's better to trace the state for figuring out why your code falls to such an exception. To do it better, avoid indefinite recursions (your code can fall very easy in relatively deep recursion - something that's not a good practice). Whenever possible use iterations instead of recursions (in the particular case recursion usage is meaningless - only makes the things more complex to debug). Why at all are you using recursion? 🤷

About rate limiting, this is because you make too many calls that change the target namespace state simultaneous. Every call that add/change/delete something in particular namespace folder tree perform such a change. You can easy decrease this number by upload all (or in groups) you files in batch(es). You can upload many files without change anything in target namespace folder tree and once entire group is uploaded (but still invisible) finish upload for all files at once (so just one change in the folders tree for all files). So you will be able add your files in one (or few - depending on total files count) step(s) that will decrease probability for rate limit error to negligible. 😉

Hope this helps.

Engg
Explorer | Level 3
Go to solution

In my case I have 500 machine and in each machine we have window service which job is upload the file to dropbox. All the 500 machine uploading files to dropbox at same time(Morning). How to huddle this. How to decrease this number by upload in this case.

Most of the system the uploading is getting failed. I'm getting following exception.

Dropbox.Api.RateLimitException: too_many_requests/...
   at Dropbox.Api.DropboxRequestHandler.<RequestJsonString>d__2f.MoveNext()
--- End of stack trace from previous location where exception was thrown ---
   at System.Runtime.ExceptionServices.ExceptionDispatchInfo.Throw()
   at System.Runtime.CompilerServices.TaskAwaiter.HandleNonSuccessAndDebuggerNotification(Task task)
   at Dropbox.Api.DropboxRequestHandler.<RequestJsonStringWithRetry>d__1a.MoveNext()
--- End of stack trace from previous location where exception was thrown ---
   at System.Runtime.ExceptionServices.ExceptionDispatchInfo.Throw()
   at System.Runtime.CompilerServices.TaskAwaiter.HandleNonSuccessAndDebuggerNotification(Task task)
   at Dropbox.Api.DropboxRequestHandler.<Dropbox.Api.Stone.ITransport.SendUploadRequestAsync>d__d`3.MoveNext()
--- End of stack trace from previous location where exception was thrown ---
   at System.Runtime.ExceptionServices.ExceptionDispatchInfo.Throw()
   at System.Runtime.CompilerServices.TaskAwaiter.HandleNonSuccessAndDebuggerNotification(Task task)
   at Odin.Compute.OdinDatabaseBackupManager.Program.<ChunkUpload>d__8.MoveNext()
--- End of stack trace from previous location where exception was thrown ---
   at System.Runtime.ExceptionServices.ExceptionDispatchInfo.Throw()
   at System.Runtime.CompilerServices.TaskAwaiter.HandleNonSuccessAndDebuggerNotification(Task task)
   at Odin.Compute.OdinDatabaseBackupManager.Program.<Upload>d__7.MoveNext()
--- End of stack trace from previous location where exception was thrown ---
   at System.Runtime.ExceptionServices.ExceptionDispatchInfo.Throw()
   at System.Runtime.CompilerServices.TaskAwaiter.HandleNonSuccessAndDebuggerNotification(Task task)
   at Odin.Compute.OdinDatabaseBackupManager.Program.<Run>d__5.MoveNext(); Request Id: 18b89669a35b4234bb4592f84d61c048


To test this I came up the code that I shared before. Can you share me any code for reference it will be helpful.

Здравко
Legendary | Level 20
Go to solution

Hm..🤔 I thought I was clear, but it seems not enough.


@Engg wrote:

...
List<Task> tasks = new List<Task>();

foreach (var item in dbLocalFilesPath.Select((value, i) => new { i, value }))
{

string dbLocalPath = item.value;
int index = item.i;
Logger.Instance.LogInfo($"dbLocalPath: {dbLocalPath}");
tasks.Add(Task.Run(async () =>

...


As can be seen here you are starting multiple uploads at the same time (all files at the particular machine). Right? All right up to here. Let see what's next:


@Engg wrote:

...
FileMetadata fileMetadata = await files.UploadAsync(path, clientModified: clientModified, body: body);

...
await client.Files.UploadSessionFinishAsync(cursor: cursor, commit: new CommitInfo(remotePath), body: memStream);
...


In all cases, for any file size, you are using uploads that directly try modify Dropbox account folder tree!!! Dropbox doesn't let this to happen! Every change in the folder tree is strictly serialized and if many waiting query appear, some of them receive rate limit error (read this limit as a limiting change rate, not exactly the upload rate). That's where your issue is coming from. Is it more clear now? What if you upload using sessions regardless file size and instead of using finish at the every file end (something that triggers the change) you use batch version of the finish (just one change - here I assume you have no more than 1000 files per machine that need upload)? 🧐 Where will limiting error happen almost sure and where wouldn't? 😉

 

PS: In addition you can en-schedule your machines uploads in a slightly different time - let's say 10 seconds time steps one after other.

Engg
Explorer | Level 3
Go to solution

One machine will have only one file which may be more than 1 GB. Only issue is all the 500 machine is getting uploaded at same time and each machine will try to modify Dropbox account folder tree. In this case when I should do UploadSessionFinishAsync.
Any way to handle this?



Здравко
Legendary | Level 20
Go to solution

@Engg wrote:

One machine will have only one file which may be more than 1 GB. ...


If per machine is a single file only to upload, you don't need loop. Why at all are you calling multiple parallel uploads in such a case? File size doesn't matter when upload and finishing are separate. Upload itself is not limited as I said already. Just when call UploadSessionFinishAsync try reorder your code so everything is already uploaded - no content in its POST query - faster processing.

 


@Engg wrote:

... Only issue is all the 500 machine is getting uploaded at same time and each machine will try to modify Dropbox account folder tree. ...


Hm..🤔 There is no a single way to handle this (neither best). A step in this direction, as I said, is to reschedule upload so the they won't overlap (so less likely to compete to each other). You should continue handle the rate limiting error, but don't need to re-upload the file anew. Once the file is uploaded, you can try finish it as long and many times as needed (i.e. repeat finishing only) in a loop with proper backoff.

Good luck.

Engg
Explorer | Level 3
Go to solution

If per machine is a single file only to upload, you don't need loop. Why at all are you calling multiple parallel uploads in such a case?

Just for the testing I created a console app. To replicate the 500 machine test

You should continue handle the rate limiting error, but don't need to re-upload the file anew. 

What you mean by this? did you have any code for this?

Здравко
Legendary | Level 20
Go to solution

To be honest I'm not sure what confuses you. You perform already upload finishing. Right? If you don't need to add some content there, the only needed is session identifier (that represents the uploaded file but not finished yet). If you don't finish the uploaded file successfully in first try, you can try next time (after proper backoff that should grow up continuously and it's a good idea to add some random component) - the session id stays valid for week.

Engg
Explorer | Level 3
Go to solution

Do you have any code how to retry if there is any failure. Because in all the Call we need Stream how to get the remaining Stream without uploading it again.

Здравко
Legendary | Level 20
Go to solution

@Engg wrote:

... Because in all the Call we need Stream how to get the remaining Stream without uploading it again.


Hm..🤔🤷 What kind of "remaining Stream" do you need at all and why? Honestly, I cannot understand you.

Probably you don't understand how Dropbox upload works in general and that's what confuses you. In all cases (either using explicitly upload session or using simplified upload) the actual upload is performed as from your stream (either provided in one step for small files or in many steps for large files) to a POST HTTP request body gets formed, which fills (at once or step by step) the upload session body (residing on Dropbox server and represented with session id, that you may have or not). Once upload session gets filled upload may be finished (gets associated with file name and path) to users account. 😉 That's it. This flow happens even when you just use simple upload - then it just happens in one call and you don't have access to session id (you don't need it).

No one file can be uploaded directly to particular place in users account (even though such your use of 'UploadAsync' builds such an illusion)! Upload always happens to something like temporary file - upload session! At the end, this session' content can be copied to desired place in users account - upload finishing. About expected streams - all this is option and such a stream always can be empty.

Some example, showing all explained, follows:

private readonly long CHUNK_SIZE = 4 * 1024 * 1024;

private async Task UploadSessionAppenV2Async(DropboxClient dbx, UploadSessionCursor cursor, byte[] buffer, int bytesRead, bool close = false)
{
    try
    {
        var memStream = new MemoryStream(buffer, 0, bytesRead);
        await dbx.Files.UploadSessionAppendV2Async(cursor, close, null, memStream);
    }
    catch(Exception ex)
    {
        Console.WriteLine("Error while append at offset {0}: {1}", cursor.Offset, ex);
    }
}

async Task TestBigUpload(DropboxClient dbx, string filePath)
{
    Console.WriteLine("Започва сесия за качване на {0} като {1}", filePath, filePath.Substring(filePath.LastIndexOf('/')));
    var fileStream = File.OpenRead(filePath);
    Console.WriteLine("Размерът на файла е {0} байта.", fileStream.Length);
    var numChunks = (int)Math.Ceiling(((double)fileStream.Length) / CHUNK_SIZE);
    Console.WriteLine("Ще се качва на {0} части.", numChunks);
    var buffer = new byte[CHUNK_SIZE];
    var memStream = new MemoryStream(buffer, 0, 0);
    var sessionId = (await dbx.Files.UploadSessionStartAsync(body: memStream)).SessionId;
    Console.WriteLine("Отвори се сессия с идентификатор: {0}", sessionId);
    for (var i = 0; i < numChunks - 1; ++i)
    {
        var bytesRead = fileStream.Read(buffer, 0, (int)CHUNK_SIZE);
        var cursor = new UploadSessionCursor(sessionId, (ulong)(i * CHUNK_SIZE));
        Console.WriteLine("Качва се блок с размер {0} на позиция {1}", bytesRead, i * CHUNK_SIZE);
        await UploadSessionAppenV2Async(dbx, cursor, buffer, bytesRead);
    }
    var bytesReaded = fileStream.Read(buffer, 0, (int)CHUNK_SIZE);
    Console.WriteLine("Последият прочетен блок е с размер {0}", bytesReaded);
    var lastCursor = new UploadSessionCursor(sessionId, (ulong)((numChunks - 1) * CHUNK_SIZE));
    await UploadSessionAppenV2Async(dbx, lastCursor, buffer, bytesReaded, true);
    lastCursor = new UploadSessionCursor(sessionId, (ulong)fileStream.Length);
    var commitInfo = new CommitInfo(filePath.Substring(filePath.LastIndexOf('/')), WriteMode.Overwrite.Instance, false);
    memStream = new MemoryStream(buffer, 0, 0);
    await dbx.Files.UploadSessionFinishAsync(lastCursor, commitInfo, body: memStream);
    Console.WriteLine("Качването на {0} приключи.", filePath.Substring(filePath.LastIndexOf('/')));
    Console.WriteLine("Започва приключване и на неговото копие {0} \ud83d\ude09...", "/testSubfolder" + filePath.Substring(filePath.LastIndexOf('/')));
    lastCursor = new UploadSessionCursor(sessionId, (ulong)fileStream.Length);
    commitInfo = new CommitInfo("/testSubfolder" + filePath.Substring(filePath.LastIndexOf('/')), WriteMode.Overwrite.Instance, false);
    memStream = new MemoryStream(buffer, 0, 0);
    await dbx.Files.UploadSessionFinishAsync(lastCursor, commitInfo, body: memStream);
}

This code can upload file of any size. In some places empty streams are used. Just call 'TestBigUpload' from your code to see the result. The temporary file will appear as 2 copies - showing how the session can be used later again without real stream (including in exception handler - not used here). 🙂

Hope this sheds some more light.

 

PS: The code is just example and is far from optimal - implement it in your code better.

Need more support?
Who's talking

Top contributors to this post

  • User avatar
    Здравко Legendary | Level 20
  • User avatar
    Engg Explorer | Level 3
What do Dropbox user levels mean?