forked from organicmaps/organicmaps
added flags for not-cleaning resume and partially downloaded files upon destruction of HttpRequest.
This commit is contained in:
parent
004567c59b
commit
48b7040b8e
2 changed files with 14 additions and 8 deletions
|
@ -106,6 +106,7 @@ class FileHttpRequest : public HttpRequest, public IHttpThreadCallback
|
|||
scoped_ptr<FileWriter> m_writer;
|
||||
|
||||
size_t m_goodChunksCount;
|
||||
bool m_doCleanProgressFiles;
|
||||
|
||||
ChunksDownloadStrategy::ResultT StartThreads()
|
||||
{
|
||||
|
@ -239,11 +240,12 @@ class FileHttpRequest : public HttpRequest, public IHttpThreadCallback
|
|||
|
||||
public:
|
||||
FileHttpRequest(vector<string> const & urls, string const & filePath, int64_t fileSize,
|
||||
CallbackT onFinish, CallbackT onProgress, int64_t chunkSize)
|
||||
CallbackT onFinish, CallbackT onProgress, int64_t chunkSize, bool doCleanProgressFiles)
|
||||
: HttpRequest(onFinish, onProgress), m_strategy(urls, fileSize, chunkSize),
|
||||
m_filePath(filePath),
|
||||
m_writer(new FileWriter(filePath + DOWNLOADING_FILE_EXTENSION, FileWriter::OP_WRITE_EXISTING)),
|
||||
m_goodChunksCount(0)
|
||||
m_goodChunksCount(0),
|
||||
m_doCleanProgressFiles(doCleanProgressFiles)
|
||||
{
|
||||
ASSERT_GREATER(fileSize, 0, ("At the moment only known file sizes are supported"));
|
||||
ASSERT(!urls.empty(), ("Urls list shouldn't be empty"));
|
||||
|
@ -273,11 +275,14 @@ public:
|
|||
DeleteNativeHttpThread(it->first);
|
||||
|
||||
if (m_status == EInProgress)
|
||||
{ // means that client canceled donwload process
|
||||
{ // means that client canceled download process
|
||||
// so delete all temporary files
|
||||
m_writer.reset();
|
||||
FileWriter::DeleteFileX(m_filePath + DOWNLOADING_FILE_EXTENSION);
|
||||
FileWriter::DeleteFileX(m_filePath + RESUME_FILE_EXTENSION);
|
||||
if (m_doCleanProgressFiles)
|
||||
{
|
||||
FileWriter::DeleteFileX(m_filePath + DOWNLOADING_FILE_EXTENSION);
|
||||
FileWriter::DeleteFileX(m_filePath + RESUME_FILE_EXTENSION);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -310,9 +315,9 @@ HttpRequest * HttpRequest::PostJson(string const & url, string const & postData,
|
|||
}
|
||||
|
||||
HttpRequest * HttpRequest::GetFile(vector<string> const & urls, string const & filePath, int64_t fileSize,
|
||||
CallbackT onFinish, CallbackT onProgress, int64_t chunkSize)
|
||||
CallbackT onFinish, CallbackT onProgress, int64_t chunkSize, bool doCleanProgressFiles)
|
||||
{
|
||||
return new FileHttpRequest(urls, filePath, fileSize, onFinish, onProgress, chunkSize);
|
||||
return new FileHttpRequest(urls, filePath, fileSize, onFinish, onProgress, chunkSize, doCleanProgressFiles);
|
||||
}
|
||||
|
||||
bool ParseServerList(string const & jsonStr, vector<string> & outUrls)
|
||||
|
|
|
@ -49,7 +49,8 @@ public:
|
|||
static HttpRequest * GetFile(vector<string> const & urls, string const & filePath,
|
||||
int64_t projectedFileSize,
|
||||
CallbackT onFinish, CallbackT onProgress = CallbackT(),
|
||||
int64_t chunkSize = 512 * 1024);
|
||||
int64_t chunkSize = 512 * 1024,
|
||||
bool doCleanProgressFiles = true);
|
||||
};
|
||||
|
||||
bool ParseServerList(string const & jsonStr, vector<string> & outUrls);
|
||||
|
|
Loading…
Add table
Reference in a new issue