forked from organicmaps/organicmaps
[downloader] Added resume support for chunked downloads
This commit is contained in:
parent
710aa71023
commit
2dc0ca689c
4 changed files with 84 additions and 14 deletions
|
@ -10,7 +10,8 @@ namespace downloader
|
|||
|
||||
ChunksDownloadStrategy::RangeT const ChunksDownloadStrategy::INVALID_RANGE = RangeT(-1, -1);
|
||||
|
||||
ChunksDownloadStrategy::ChunksDownloadStrategy(vector<string> const & urls, int64_t fileSize, int64_t chunkSize)
|
||||
ChunksDownloadStrategy::ChunksDownloadStrategy(vector<string> const & urls, int64_t fileSize,
|
||||
int64_t chunkSize)
|
||||
: m_chunkSize(chunkSize)
|
||||
{
|
||||
// init servers list
|
||||
|
@ -18,12 +19,13 @@ ChunksDownloadStrategy::ChunksDownloadStrategy(vector<string> const & urls, int6
|
|||
m_servers.push_back(make_pair(urls[i], INVALID_RANGE));
|
||||
|
||||
// init chunks which should be downloaded
|
||||
// @TODO implement download resume by saving chunks to download for specified file
|
||||
for (int64_t i = 0; i < fileSize; i += chunkSize)
|
||||
{
|
||||
m_chunksToDownload.insert(RangeT(i, min(i + chunkSize - 1,
|
||||
fileSize - 1)));
|
||||
}
|
||||
m_chunksToDownload.insert(RangeT(i, min(i + chunkSize - 1, fileSize - 1)));
|
||||
}
|
||||
|
||||
void ChunksDownloadStrategy::SetChunksToDownload(RangesContainerT & chunks)
|
||||
{
|
||||
m_chunksToDownload.swap(chunks);
|
||||
}
|
||||
|
||||
void ChunksDownloadStrategy::ChunkFinished(bool successfully, int64_t begRange, int64_t endRange)
|
||||
|
|
|
@ -10,16 +10,20 @@ namespace downloader
|
|||
|
||||
class ChunksDownloadStrategy
|
||||
{
|
||||
int64_t m_chunkSize;
|
||||
|
||||
public:
|
||||
typedef pair<int64_t, int64_t> RangeT;
|
||||
typedef set<RangeT> RangesContainerT;
|
||||
|
||||
private:
|
||||
int64_t m_chunkSize;
|
||||
static RangeT const INVALID_RANGE;
|
||||
/// <server url, currently downloading range or INVALID_RANGE if url is not used>
|
||||
typedef vector<pair<string, RangeT> > ServersT;
|
||||
ServersT m_servers;
|
||||
set<RangeT> m_chunksToDownload;
|
||||
RangesContainerT m_chunksToDownload;
|
||||
|
||||
public:
|
||||
/// @param[in] chunksToDownload used for resume
|
||||
ChunksDownloadStrategy(vector<string> const & urls, int64_t fileSize, int64_t chunkSize = 512 * 1024);
|
||||
|
||||
int64_t ChunkSize() const { return m_chunkSize; }
|
||||
|
@ -34,6 +38,9 @@ public:
|
|||
};
|
||||
/// Should be called until returns ENextChunk
|
||||
ResultT NextChunk(string & outUrl, int64_t & begRange, int64_t & endRange);
|
||||
|
||||
void SetChunksToDownload(RangesContainerT & chunks);
|
||||
RangesContainerT const & ChunksLeft() const { return m_chunksToDownload; }
|
||||
};
|
||||
|
||||
} // namespace downloader
|
||||
|
|
|
@ -6,7 +6,10 @@
|
|||
#include "../base/thread.hpp"
|
||||
#endif
|
||||
|
||||
#include "../coding/file_writer.hpp"
|
||||
#include "../base/std_serialization.hpp"
|
||||
|
||||
#include "../coding/file_writer_stream.hpp"
|
||||
#include "../coding/file_reader_stream.hpp"
|
||||
|
||||
#include "../std/scoped_ptr.hpp"
|
||||
|
||||
|
@ -102,6 +105,9 @@ class FileHttpRequest : public HttpRequest, public IHttpThreadCallback
|
|||
string m_filePath;
|
||||
scoped_ptr<FileWriter> m_writer;
|
||||
|
||||
/// Used to save not downloaded chunks for later resume not so often
|
||||
size_t m_goodChunksCount;
|
||||
|
||||
ChunksDownloadStrategy::ResultT StartThreads()
|
||||
{
|
||||
string url;
|
||||
|
@ -150,7 +156,7 @@ class FileHttpRequest : public HttpRequest, public IHttpThreadCallback
|
|||
if (result == ChunksDownloadStrategy::EDownloadSucceeded
|
||||
|| result == ChunksDownloadStrategy::ENoFreeServers)
|
||||
{
|
||||
m_progress.first += m_strategy.ChunkSize();
|
||||
m_progress.first += (endRange - begRange);
|
||||
if (m_onProgress)
|
||||
m_onProgress(*this);
|
||||
}
|
||||
|
@ -158,7 +164,17 @@ class FileHttpRequest : public HttpRequest, public IHttpThreadCallback
|
|||
if (result == ChunksDownloadStrategy::EDownloadFailed)
|
||||
m_status = EFailed;
|
||||
else if (result == ChunksDownloadStrategy::EDownloadSucceeded)
|
||||
{
|
||||
m_status = ECompleted;
|
||||
++m_goodChunksCount;
|
||||
if (m_strategy.ChunksLeft().empty())
|
||||
FileWriter::DeleteFileX(m_filePath + ".resume");
|
||||
else
|
||||
{
|
||||
if (m_goodChunksCount % 10 == 0)
|
||||
SaveRanges(m_filePath + ".resume", m_strategy.ChunksLeft());
|
||||
}
|
||||
}
|
||||
|
||||
if (m_status != EInProgress)
|
||||
{
|
||||
|
@ -167,15 +183,60 @@ class FileHttpRequest : public HttpRequest, public IHttpThreadCallback
|
|||
}
|
||||
}
|
||||
|
||||
/// @return true if ranges are present and loaded
|
||||
static bool LoadRanges(string const & file, ChunksDownloadStrategy::RangesContainerT & ranges)
|
||||
{
|
||||
ranges.clear();
|
||||
try
|
||||
{
|
||||
FileReaderStream frs(file);
|
||||
frs >> ranges;
|
||||
}
|
||||
catch (std::exception const &)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
return !ranges.empty();
|
||||
}
|
||||
|
||||
static void SaveRanges(string const & file, ChunksDownloadStrategy::RangesContainerT const & ranges)
|
||||
{
|
||||
FileWriterStream fws(file);
|
||||
fws << ranges;
|
||||
}
|
||||
|
||||
struct CalcRanges
|
||||
{
|
||||
int64_t & m_summ;
|
||||
CalcRanges(int64_t & summ) : m_summ(summ) {}
|
||||
void operator()(ChunksDownloadStrategy::RangeT const & range)
|
||||
{
|
||||
m_summ += (range.second - range.first);
|
||||
}
|
||||
};
|
||||
|
||||
public:
|
||||
FileHttpRequest(vector<string> const & urls, string const & filePath, int64_t fileSize,
|
||||
CallbackT onFinish, CallbackT onProgress, int64_t chunkSize)
|
||||
: HttpRequest(onFinish, onProgress), m_strategy(urls, fileSize, chunkSize),
|
||||
m_filePath(filePath), m_writer(new FileWriter(filePath, FileWriter::OP_WRITE_EXISTING))
|
||||
m_filePath(filePath), m_writer(new FileWriter(filePath, FileWriter::OP_WRITE_EXISTING)),
|
||||
m_goodChunksCount(0)
|
||||
{
|
||||
ASSERT_GREATER(fileSize, 0, ("At the moment only known file sizes are supported"));
|
||||
ASSERT(!urls.empty(), ("Urls list shouldn't be empty"));
|
||||
// store expected file size for future checks
|
||||
m_progress.second = fileSize;
|
||||
|
||||
// Resume support - load chunks which should be downloaded (if they're present)
|
||||
ChunksDownloadStrategy::RangesContainerT ranges;
|
||||
if (LoadRanges(filePath + ".resume", ranges))
|
||||
{
|
||||
// fix progress
|
||||
int64_t sizeLeft = 0;
|
||||
for_each(ranges.begin(), ranges.end(), CalcRanges(sizeLeft));
|
||||
m_progress.first = fileSize - sizeLeft;
|
||||
m_strategy.SetChunksToDownload(ranges);
|
||||
}
|
||||
|
||||
StartThreads();
|
||||
}
|
||||
|
||||
|
|
|
@ -416,7 +416,7 @@ struct ResumeChecker
|
|||
{
|
||||
if (m_counter == 0)
|
||||
{
|
||||
TEST_EQUAL(request.Progress(), make_pair(FILESIZE - beg2, FILESIZE), ());
|
||||
TEST_EQUAL(request.Progress(), make_pair(beg2 + 1, FILESIZE), ());
|
||||
}
|
||||
else if (m_counter == 1)
|
||||
{
|
||||
|
|
Loading…
Add table
Reference in a new issue