ICU-13399 Fix thread race in the Unified Cache.

X-SVN-Rev: 40994
This commit is contained in:
Andy Heninger 2018-02-27 19:27:42 +00:00
parent 216c856d80
commit 887071e232
6 changed files with 421 additions and 376 deletions

View file

@ -8,7 +8,10 @@
* sharedobject.cpp
*/
#include "sharedobject.h"
#include "mutex.h"
#include "uassert.h"
#include "umutex.h"
#include "unifiedcache.h"
U_NAMESPACE_BEGIN
@ -17,69 +20,41 @@ SharedObject::~SharedObject() {}
UnifiedCacheBase::~UnifiedCacheBase() {}
void
SharedObject::addRef(UBool fromWithinCache) const {
umtx_atomic_inc(&totalRefCount);
// Although items in use may not be correct immediately, it
// will be correct eventually.
if (umtx_atomic_inc(&hardRefCount) == 1 && cachePtr != NULL) {
// If this object is cached, and the hardRefCount goes from 0 to 1,
// then the increment must happen from within the cache while the
// cache global mutex is locked. In this way, we can be rest assured
// that data races can't happen if the cache performs some task if
// the hardRefCount is zero while the global cache mutex is locked.
(void)fromWithinCache; // Suppress unused variable warning in non-debug builds.
U_ASSERT(fromWithinCache);
cachePtr->incrementItemsInUse();
}
SharedObject::addRef() const {
umtx_atomic_inc(&hardRefCount);
}
// removeRef Decrement the reference count and delete if it is zero.
// Note that SharedObjects with a non-null cachePtr are owned by the
// unified cache, and the cache will be responsible for the actual deletion.
// The deletion could be as soon as immediately following the
// update to the reference count, if another thread is running
// a cache eviction cycle concurrently.
// NO ACCESS TO *this PERMITTED AFTER REFERENCE COUNT == 0 for cached objects.
// THE OBJECT MAY ALREADY BE GONE.
void
SharedObject::removeRef(UBool fromWithinCache) const {
UBool decrementItemsInUse = (umtx_atomic_dec(&hardRefCount) == 0);
UBool allReferencesGone = (umtx_atomic_dec(&totalRefCount) == 0);
// Although items in use may not be correct immediately, it
// will be correct eventually.
if (decrementItemsInUse && cachePtr != NULL) {
if (fromWithinCache) {
cachePtr->decrementItemsInUse();
SharedObject::removeRef() const {
const UnifiedCacheBase *cache = this->cachePtr;
int32_t updatedRefCount = umtx_atomic_dec(&hardRefCount);
U_ASSERT(updatedRefCount >= 0);
if (updatedRefCount == 0) {
if (cache) {
cache->handleUnreferencedObject();
} else {
cachePtr->decrementItemsInUseWithLockingAndEviction();
delete this;
}
}
if (allReferencesGone) {
delete this;
}
}
void
SharedObject::addSoftRef() const {
umtx_atomic_inc(&totalRefCount);
++softRefCount;
}
void
SharedObject::removeSoftRef() const {
--softRefCount;
if (umtx_atomic_dec(&totalRefCount) == 0) {
delete this;
}
}
int32_t
SharedObject::getRefCount() const {
return umtx_loadAcquire(totalRefCount);
}
int32_t
SharedObject::getHardRefCount() const {
return umtx_loadAcquire(hardRefCount);
}
void
SharedObject::deleteIfZeroRefCount() const {
if(getRefCount() == 0) {
if (this->cachePtr == nullptr && getRefCount() == 0) {
delete this;
}
}

View file

@ -17,6 +17,8 @@
U_NAMESPACE_BEGIN
class SharedObject;
/**
* Base class for unified cache exposing enough methods to SharedObject
* instances to allow their addRef() and removeRef() methods to
@ -28,22 +30,12 @@ public:
UnifiedCacheBase() { }
/**
* Called by addRefWhileHoldingCacheLock() when the hard reference count
* of its instance goes from 0 to 1.
* Notify the cache implementation that an object was seen transitioning to
* zero hard references. The cache may use this to keep track the number of
* unreferenced SharedObjects, and to trigger evictions.
*/
virtual void incrementItemsInUse() const = 0;
virtual void handleUnreferencedObject() const = 0;
/**
* Called by removeRef() when the hard reference count of its instance
* drops from 1 to 0.
*/
virtual void decrementItemsInUseWithLockingAndEviction() const = 0;
/**
* Called by removeRefWhileHoldingCacheLock() when the hard reference
* count of its instance drops from 1 to 0.
*/
virtual void decrementItemsInUse() const = 0;
virtual ~UnifiedCacheBase();
private:
UnifiedCacheBase(const UnifiedCacheBase &);
@ -63,7 +55,6 @@ class U_COMMON_API SharedObject : public UObject {
public:
/** Initializes totalRefCount, softRefCount to 0. */
SharedObject() :
totalRefCount(0),
softRefCount(0),
hardRefCount(0),
cachePtr(NULL) {}
@ -71,7 +62,6 @@ public:
/** Initializes totalRefCount, softRefCount to 0. */
SharedObject(const SharedObject &other) :
UObject(other),
totalRefCount(0),
softRefCount(0),
hardRefCount(0),
cachePtr(NULL) {}
@ -79,93 +69,45 @@ public:
virtual ~SharedObject();
/**
* Increments the number of references to this object. Thread-safe.
* Increments the number of hard references to this object. Thread-safe.
* Not for use from within the Unified Cache implementation.
*/
void addRef() const { addRef(FALSE); }
void addRef() const;
/**
* Increments the number of references to this object.
* Must be called only from within the internals of UnifiedCache and
* only while the cache global mutex is held.
* Decrements the number of hard references to this object, and
* arrange for possible cache-eviction and/or deletion if ref
* count goes to zero. Thread-safe.
*
* Not for use from within the UnifiedCache implementation.
*/
void addRefWhileHoldingCacheLock() const { addRef(TRUE); }
void removeRef() const;
/**
* Increments the number of soft references to this object.
* Must be called only from within the internals of UnifiedCache and
* only while the cache global mutex is held.
*/
void addSoftRef() const;
/**
* Decrements the number of references to this object. Thread-safe.
*/
void removeRef() const { removeRef(FALSE); }
/**
* Decrements the number of references to this object.
* Must be called only from within the internals of UnifiedCache and
* only while the cache global mutex is held.
*/
void removeRefWhileHoldingCacheLock() const { removeRef(TRUE); }
/**
* Decrements the number of soft references to this object.
* Must be called only from within the internals of UnifiedCache and
* only while the cache global mutex is held.
*/
void removeSoftRef() const;
/**
* Returns the reference counter including soft references.
* Returns the number of hard references for this object.
* Uses a memory barrier.
*/
int32_t getRefCount() const;
/**
* Returns the count of soft references only.
* Must be called only from within the internals of UnifiedCache and
* only while the cache global mutex is held.
*/
int32_t getSoftRefCount() const { return softRefCount; }
/**
* Returns the count of hard references only. Uses a memory barrier.
* Used for testing the cache. Regular clients won't need this.
*/
int32_t getHardRefCount() const;
/**
* If noHardReferences() == TRUE then this object has no hard references.
* Must be called only from within the internals of UnifiedCache.
*/
inline UBool noHardReferences() const { return getHardRefCount() == 0; }
inline UBool noHardReferences() const { return getRefCount() == 0; }
/**
* If hasHardReferences() == TRUE then this object has hard references.
* Must be called only from within the internals of UnifiedCache.
*/
inline UBool hasHardReferences() const { return getHardRefCount() != 0; }
inline UBool hasHardReferences() const { return getRefCount() != 0; }
/**
* If noSoftReferences() == TRUE then this object has no soft references.
* Must be called only from within the internals of UnifiedCache and
* only while the cache global mutex is held.
*/
UBool noSoftReferences() const { return (softRefCount == 0); }
/**
* Deletes this object if it has no references or soft references.
* Deletes this object if it has no references.
* Available for non-cached SharedObjects only. Ownership of cached objects
* is with the UnifiedCache, which is soley responsible for eviction and deletion.
*/
void deleteIfZeroRefCount() const;
/**
* @internal For UnifedCache use only to register this object with itself.
* Must be called before this object is exposed to multiple threads.
*/
void registerWithCache(const UnifiedCacheBase *ptr) const {
cachePtr = ptr;
}
/**
* Returns a writable version of ptr.
@ -219,15 +161,21 @@ public:
}
private:
mutable u_atomic_int32_t totalRefCount;
// Any thread modifying softRefCount must hold the global cache mutex
/**
* The number of references from the UnifiedCache, which is
* the number of times that the sharedObject is stored as a hash table value.
* For use by UnifiedCache implementation code only.
* All access is synchronized by UnifiedCache's gCacheMutex
*/
mutable int32_t softRefCount;
friend class UnifiedCache;
/**
* Reference count, excluding references from within the UnifiedCache implementation.
*/
mutable u_atomic_int32_t hardRefCount;
mutable const UnifiedCacheBase *cachePtr;
void addRef(UBool withCacheLock) const;
void removeRef(UBool withCacheLock) const;
};

View file

@ -2,28 +2,30 @@
// License & terms of use: http://www.unicode.org/copyright.html
/*
******************************************************************************
* Copyright (C) 2015, International Business Machines Corporation and
* others. All Rights Reserved.
* Copyright (C) 2015, International Business Machines Corporation and
* others. All Rights Reserved.
******************************************************************************
*
* File UNIFIEDCACHE.CPP
*
* File unifiedcache.cpp
******************************************************************************
*/
#include "uhash.h"
#include "unifiedcache.h"
#include "umutex.h"
#include <algorithm> // For std::max()
#include "mutex.h"
#include "uassert.h"
#include "uhash.h"
#include "ucln_cmn.h"
#include "umutex.h"
static icu::UnifiedCache *gCache = NULL;
static icu::SharedObject *gNoValue = NULL;
static UMutex gCacheMutex = U_MUTEX_INITIALIZER;
static UConditionVar gInProgressValueAddedCond = U_CONDITION_INITIALIZER;
static icu::UInitOnce gCacheInitOnce = U_INITONCE_INITIALIZER;
static const int32_t MAX_EVICT_ITERATIONS = 10;
static const int32_t MAX_EVICT_ITERATIONS = 10;
static const int32_t DEFAULT_MAX_UNUSED = 1000;
static const int32_t DEFAULT_PERCENTAGE_OF_IN_USE = 100;
@ -35,10 +37,6 @@ static UBool U_CALLCONV unifiedcache_cleanup() {
delete gCache;
gCache = NULL;
}
if (gNoValue) {
delete gNoValue;
gNoValue = NULL;
}
return TRUE;
}
U_CDECL_END
@ -73,23 +71,15 @@ static void U_CALLCONV cacheInit(UErrorCode &status) {
ucln_common_registerCleanup(
UCLN_COMMON_UNIFIED_CACHE, unifiedcache_cleanup);
// gNoValue must be created first to avoid assertion error in
// cache constructor.
gNoValue = new SharedObject();
gCache = new UnifiedCache(status);
if (gCache == NULL) {
status = U_MEMORY_ALLOCATION_ERROR;
}
if (U_FAILURE(status)) {
delete gCache;
delete gNoValue;
gCache = NULL;
gNoValue = NULL;
return;
}
// We add a softref because we want hash elements with gNoValue to be
// elligible for purging but we don't ever want gNoValue to be deleted.
gNoValue->addSoftRef();
}
UnifiedCache *UnifiedCache::getInstance(UErrorCode &status) {
@ -104,14 +94,24 @@ UnifiedCache *UnifiedCache::getInstance(UErrorCode &status) {
UnifiedCache::UnifiedCache(UErrorCode &status) :
fHashtable(NULL),
fEvictPos(UHASH_FIRST),
fItemsInUseCount(0),
fNumValuesTotal(0),
fNumValuesInUse(0),
fMaxUnused(DEFAULT_MAX_UNUSED),
fMaxPercentageOfInUse(DEFAULT_PERCENTAGE_OF_IN_USE),
fAutoEvictedCount(0) {
fAutoEvictedCount(0),
fNoValue(nullptr) {
if (U_FAILURE(status)) {
return;
}
U_ASSERT(gNoValue != NULL);
fNoValue = new SharedObject();
if (fNoValue == nullptr) {
status = U_MEMORY_ALLOCATION_ERROR;
return;
}
fNoValue->softRefCount = 1; // Add fake references to prevent fNoValue from being deleted
fNoValue->hardRefCount = 1; // when other references to it are removed.
fNoValue->cachePtr = this;
fHashtable = uhash_open(
&ucache_hashKeys,
&ucache_compareKeys,
@ -139,7 +139,7 @@ void UnifiedCache::setEvictionPolicy(
int32_t UnifiedCache::unusedCount() const {
Mutex lock(&gCacheMutex);
return uhash_count(fHashtable) - fItemsInUseCount;
return uhash_count(fHashtable) - fNumValuesInUse;
}
int64_t UnifiedCache::autoEvictedCount() const {
@ -161,6 +161,12 @@ void UnifiedCache::flush() const {
while (_flush(FALSE));
}
void UnifiedCache::handleUnreferencedObject() const {
Mutex lock(&gCacheMutex);
--fNumValuesInUse;
_runEvictionSlice();
}
#ifdef UNIFIED_CACHE_DEBUG
#include <stdio.h>
@ -196,10 +202,10 @@ void UnifiedCache::_dumpContents() const {
++cnt;
fprintf(
stderr,
"Unified Cache: Key '%s', error %d, value %p, total refcount %d, soft refcount %d\n",
"Unified Cache: Key '%s', error %d, value %p, total refcount %d, soft refcount %d\n",
key->writeDescription(buffer, 256),
key->creationStatus,
sharedObject == gNoValue ? NULL :sharedObject,
sharedObject == fNoValue ? NULL :sharedObject,
sharedObject->getRefCount(),
sharedObject->getSoftRefCount());
}
@ -213,16 +219,17 @@ UnifiedCache::~UnifiedCache() {
flush();
{
// Now all that should be left in the cache are entries that refer to
// each other and entries with hard references from outside the cache.
// each other and entries with hard references from outside the cache.
// Nothing we can do about these so proceed to wipe out the cache.
Mutex lock(&gCacheMutex);
_flush(TRUE);
}
uhash_close(fHashtable);
fHashtable = nullptr;
delete fNoValue;
fNoValue = nullptr;
}
// Returns the next element in the cache round robin style.
// On entry, gCacheMutex must be held.
const UHashElement *
UnifiedCache::_nextElement() const {
const UHashElement *element = uhash_nextElement(fHashtable, &fEvictPos);
@ -233,46 +240,39 @@ UnifiedCache::_nextElement() const {
return element;
}
// Flushes the contents of the cache. If cache values hold references to other
// cache values then _flush should be called in a loop until it returns FALSE.
// On entry, gCacheMutex must be held.
// On exit, those values with are evictable are flushed. If all is true
// then every value is flushed even if it is not evictable.
// Returns TRUE if any value in cache was flushed or FALSE otherwise.
UBool UnifiedCache::_flush(UBool all) const {
UBool result = FALSE;
int32_t origSize = uhash_count(fHashtable);
for (int32_t i = 0; i < origSize; ++i) {
const UHashElement *element = _nextElement();
if (element == nullptr) {
break;
}
if (all || _isEvictable(element)) {
const SharedObject *sharedObject =
(const SharedObject *) element->value.pointer;
U_ASSERT(sharedObject->cachePtr = this);
uhash_removeElement(fHashtable, element);
sharedObject->removeSoftRef();
removeSoftRef(sharedObject); // Deletes the sharedObject when softRefCount goes to zero.
result = TRUE;
}
}
return result;
}
// Computes how many items should be evicted.
// On entry, gCacheMutex must be held.
// Returns number of items that should be evicted or a value <= 0 if no
// items need to be evicted.
int32_t UnifiedCache::_computeCountOfItemsToEvict() const {
int32_t maxPercentageOfInUseCount =
fItemsInUseCount * fMaxPercentageOfInUse / 100;
int32_t maxUnusedCount = fMaxUnused;
if (maxUnusedCount < maxPercentageOfInUseCount) {
maxUnusedCount = maxPercentageOfInUseCount;
}
return uhash_count(fHashtable) - fItemsInUseCount - maxUnusedCount;
int32_t totalItems = uhash_count(fHashtable);
int32_t evictableItems = totalItems - fNumValuesInUse;
U_ASSERT(totalItems >= fNumValuesInUse);
U_ASSERT(totalItems >= evictableItems);
int32_t unusedLimitByPercentage = fNumValuesInUse * fMaxPercentageOfInUse / 100;
int32_t unusedLimit = std::max(unusedLimitByPercentage, fMaxUnused);
int32_t countOfItemsToEvict = std::max(0, evictableItems - unusedLimit);
U_ASSERT(countOfItemsToEvict >= 0 && countOfItemsToEvict <= evictableItems);
return countOfItemsToEvict;
}
// Run an eviction slice.
// On entry, gCacheMutex must be held.
// _runEvictionSlice runs a slice of the evict pipeline by examining the next
// 10 entries in the cache round robin style evicting them if they are eligible.
void UnifiedCache::_runEvictionSlice() const {
int32_t maxItemsToEvict = _computeCountOfItemsToEvict();
if (maxItemsToEvict <= 0) {
@ -280,11 +280,14 @@ void UnifiedCache::_runEvictionSlice() const {
}
for (int32_t i = 0; i < MAX_EVICT_ITERATIONS; ++i) {
const UHashElement *element = _nextElement();
if (element == nullptr) {
break;
}
if (_isEvictable(element)) {
const SharedObject *sharedObject =
(const SharedObject *) element->value.pointer;
uhash_removeElement(fHashtable, element);
sharedObject->removeSoftRef();
removeSoftRef(sharedObject); // Deletes sharedObject when SoftRefCount goes to zero.
++fAutoEvictedCount;
if (--maxItemsToEvict == 0) {
break;
@ -293,13 +296,8 @@ void UnifiedCache::_runEvictionSlice() const {
}
}
// Places a new value and creationStatus in the cache for the given key.
// On entry, gCacheMutex must be held. key must not exist in the cache.
// On exit, value and creation status placed under key. Soft reference added
// to value on successful add. On error sets status.
void UnifiedCache::_putNew(
const CacheKeyBase &key,
const CacheKeyBase &key,
const SharedObject *value,
const UErrorCode creationStatus,
UErrorCode &status) const {
@ -312,24 +310,17 @@ void UnifiedCache::_putNew(
return;
}
keyToAdopt->fCreationStatus = creationStatus;
if (value->noSoftReferences()) {
if (value->softRefCount == 0) {
_registerMaster(keyToAdopt, value);
}
uhash_put(fHashtable, keyToAdopt, (void *) value, &status);
void *oldValue = uhash_put(fHashtable, keyToAdopt, (void *) value, &status);
U_ASSERT(oldValue == nullptr);
(void)oldValue;
if (U_SUCCESS(status)) {
value->addSoftRef();
value->softRefCount++;
}
}
// Places value and status at key if there is no value at key or if cache
// entry for key is in progress. Otherwise, it leaves the current value and
// status there.
// On entry. gCacheMutex must not be held. value must be
// included in the reference count of the object to which it points.
// On exit, value and status are changed to what was already in the cache if
// something was there and not in progress. Otherwise, value and status are left
// unchanged in which case they are placed in the cache on a best-effort basis.
// Caller must call removeRef() on value.
void UnifiedCache::_putIfAbsentAndGet(
const CacheKeyBase &key,
const SharedObject *&value,
@ -352,15 +343,7 @@ void UnifiedCache::_putIfAbsentAndGet(
_runEvictionSlice();
}
// Attempts to fetch value and status for key from cache.
// On entry, gCacheMutex must not be held value must be NULL and status must
// be U_ZERO_ERROR.
// On exit, either returns FALSE (In this
// case caller should try to create the object) or returns TRUE with value
// pointing to the fetched value and status set to fetched status. When
// FALSE is returned status may be set to failure if an in progress hash
// entry could not be made but value will remain unchanged. When TRUE is
// returned, caler must call removeRef() on value.
UBool UnifiedCache::_poll(
const CacheKeyBase &key,
const SharedObject *&value,
@ -369,27 +352,29 @@ UBool UnifiedCache::_poll(
U_ASSERT(status == U_ZERO_ERROR);
Mutex lock(&gCacheMutex);
const UHashElement *element = uhash_find(fHashtable, &key);
while (element != NULL && _inProgress(element)) {
// If the hash table contains an inProgress placeholder entry for this key,
// this means that another thread is currently constructing the value object.
// Loop, waiting for that construction to complete.
while (element != NULL && _inProgress(element)) {
umtx_condWait(&gInProgressValueAddedCond, &gCacheMutex);
element = uhash_find(fHashtable, &key);
}
// If the hash table contains an entry for the key,
// fetch out the contents and return them.
if (element != NULL) {
_fetch(element, value, status);
_fetch(element, value, status);
return TRUE;
}
_putNew(key, gNoValue, U_ZERO_ERROR, status);
// The hash table contained nothing for this key.
// Insert an inProgress place holder value.
// Our caller will create the final value and update the hash table.
_putNew(key, fNoValue, U_ZERO_ERROR, status);
return FALSE;
}
// Gets value out of cache.
// On entry. gCacheMutex must not be held. value must be NULL. status
// must be U_ZERO_ERROR.
// On exit. value and status set to what is in cache at key or on cache
// miss the key's createObject() is called and value and status are set to
// the result of that. In this latter case, best effort is made to add the
// value and status to the cache. If createObject() fails to create a value,
// gNoValue is stored in cache, and value is set to NULL. Caller must call
// removeRef on value if non NULL.
void UnifiedCache::_get(
const CacheKeyBase &key,
const SharedObject *&value,
@ -398,7 +383,7 @@ void UnifiedCache::_get(
U_ASSERT(value == NULL);
U_ASSERT(status == U_ZERO_ERROR);
if (_poll(key, value, status)) {
if (value == gNoValue) {
if (value == fNoValue) {
SharedObject::clearPtr(value);
}
return;
@ -410,134 +395,76 @@ void UnifiedCache::_get(
U_ASSERT(value == NULL || value->hasHardReferences());
U_ASSERT(value != NULL || status != U_ZERO_ERROR);
if (value == NULL) {
SharedObject::copyPtr(gNoValue, value);
SharedObject::copyPtr(fNoValue, value);
}
_putIfAbsentAndGet(key, value, status);
if (value == gNoValue) {
if (value == fNoValue) {
SharedObject::clearPtr(value);
}
}
void UnifiedCache::decrementItemsInUseWithLockingAndEviction() const {
Mutex mutex(&gCacheMutex);
decrementItemsInUse();
_runEvictionSlice();
}
void UnifiedCache::incrementItemsInUse() const {
++fItemsInUseCount;
}
void UnifiedCache::decrementItemsInUse() const {
--fItemsInUseCount;
}
// Register a master cache entry.
// On entry, gCacheMutex must be held.
// On exit, items in use count incremented, entry is marked as a master
// entry, and value registered with cache so that subsequent calls to
// addRef() and removeRef() on it correctly updates items in use count
void UnifiedCache::_registerMaster(
const CacheKeyBase *theKey, const SharedObject *value) const {
theKey->fIsMaster = TRUE;
++fItemsInUseCount;
value->registerWithCache(this);
const CacheKeyBase *theKey, const SharedObject *value) const {
theKey->fIsMaster = true;
value->cachePtr = this;
++fNumValuesTotal;
++fNumValuesInUse;
}
// Store a value and error in given hash entry.
// On entry, gCacheMutex must be held. Hash entry element must be in progress.
// value must be non NULL.
// On Exit, soft reference added to value. value and status stored in hash
// entry. Soft reference removed from previous stored value. Waiting
// threads notified.
void UnifiedCache::_put(
const UHashElement *element,
const UHashElement *element,
const SharedObject *value,
const UErrorCode status) const {
U_ASSERT(_inProgress(element));
const CacheKeyBase *theKey = (const CacheKeyBase *) element->key.pointer;
const SharedObject *oldValue = (const SharedObject *) element->value.pointer;
theKey->fCreationStatus = status;
if (value->noSoftReferences()) {
if (value->softRefCount == 0) {
_registerMaster(theKey, value);
}
value->addSoftRef();
value->softRefCount++;
UHashElement *ptr = const_cast<UHashElement *>(element);
ptr->value.pointer = (void *) value;
oldValue->removeSoftRef();
U_ASSERT(oldValue == fNoValue);
removeSoftRef(oldValue);
// Tell waiting threads that we replace in-progress status with
// an error.
umtx_condBroadcast(&gInProgressValueAddedCond);
}
void
UnifiedCache::copyPtr(const SharedObject *src, const SharedObject *&dest) {
if(src != dest) {
if(dest != NULL) {
dest->removeRefWhileHoldingCacheLock();
}
dest = src;
if(src != NULL) {
src->addRefWhileHoldingCacheLock();
}
}
}
void
UnifiedCache::clearPtr(const SharedObject *&ptr) {
if (ptr != NULL) {
ptr->removeRefWhileHoldingCacheLock();
ptr = NULL;
}
}
// Fetch value and error code from a particular hash entry.
// On entry, gCacheMutex must be held. value must be either NULL or must be
// included in the ref count of the object to which it points.
// On exit, value and status set to what is in the hash entry. Caller must
// eventually call removeRef on value.
// If hash entry is in progress, value will be set to gNoValue and status will
// be set to U_ZERO_ERROR.
void UnifiedCache::_fetch(
const UHashElement *element,
const SharedObject *&value,
UErrorCode &status) {
UErrorCode &status) const {
const CacheKeyBase *theKey = (const CacheKeyBase *) element->key.pointer;
status = theKey->fCreationStatus;
// Since we have the cache lock, calling regular SharedObject methods
// Since we have the cache lock, calling regular SharedObject add/removeRef
// could cause us to deadlock on ourselves since they may need to lock
// the cache mutex.
UnifiedCache::copyPtr((const SharedObject *) element->value.pointer, value);
removeHardRef(value);
value = static_cast<const SharedObject *>(element->value.pointer);
addHardRef(value);
}
// Determine if given hash entry is in progress.
// On entry, gCacheMutex must be held.
UBool UnifiedCache::_inProgress(const UHashElement *element) {
const SharedObject *value = NULL;
UBool UnifiedCache::_inProgress(const UHashElement* element) const {
UErrorCode status = U_ZERO_ERROR;
const SharedObject * value = NULL;
_fetch(element, value, status);
UBool result = _inProgress(value, status);
// Since we have the cache lock, calling regular SharedObject methods
// could cause us to deadlock on ourselves since they may need to lock
// the cache mutex.
UnifiedCache::clearPtr(value);
removeHardRef(value);
return result;
}
// Determine if given hash entry is in progress.
// On entry, gCacheMutex must be held.
UBool UnifiedCache::_inProgress(
const SharedObject *theValue, UErrorCode creationStatus) {
return (theValue == gNoValue && creationStatus == U_ZERO_ERROR);
const SharedObject* theValue, UErrorCode creationStatus) const {
return (theValue == fNoValue && creationStatus == U_ZERO_ERROR);
}
// Determine if given hash entry is eligible for eviction.
// On entry, gCacheMutex must be held.
UBool UnifiedCache::_isEvictable(const UHashElement *element) {
UBool UnifiedCache::_isEvictable(const UHashElement *element) const
{
const CacheKeyBase *theKey = (const CacheKeyBase *) element->key.pointer;
const SharedObject *theValue =
(const SharedObject *) element->value.pointer;
@ -549,7 +476,47 @@ UBool UnifiedCache::_isEvictable(const UHashElement *element) {
// We can evict entries that are either not a master or have just
// one reference (The one reference being from the cache itself).
return (!theKey->fIsMaster || (theValue->getSoftRefCount() == 1 && theValue->noHardReferences()));
return (!theKey->fIsMaster || (theValue->softRefCount == 1 && theValue->noHardReferences()));
}
void UnifiedCache::removeSoftRef(const SharedObject *value) const {
U_ASSERT(value->cachePtr == this);
U_ASSERT(value->softRefCount > 0);
if (--value->softRefCount == 0) {
--fNumValuesTotal;
if (value->noHardReferences()) {
delete value;
} else {
// This path only happens from flush(all). Which only happens from the
// UnifiedCache destructor. Nulling out value.cacheptr changes the behavior
// of value.removeRef(), causing the deletion to be done there.
value->cachePtr = nullptr;
}
}
}
int32_t UnifiedCache::removeHardRef(const SharedObject *value) const {
int refCount = 0;
if (value) {
refCount = umtx_atomic_dec(&value->hardRefCount);
U_ASSERT(refCount >= 0);
if (refCount == 0) {
--fNumValuesInUse;
}
}
return refCount;
}
int32_t UnifiedCache::addHardRef(const SharedObject *value) const {
int refCount = 0;
if (value) {
refCount = umtx_atomic_inc(&value->hardRefCount);
U_ASSERT(refCount >= 1);
if (refCount == 1) {
fNumValuesInUse++;
}
}
return refCount;
}
U_NAMESPACE_END

View file

@ -190,7 +190,7 @@ class U_COMMON_API UnifiedCache : public UnifiedCacheBase {
UnifiedCache(UErrorCode &status);
/**
* Returns the cache instance.
* Return a pointer to the global cache instance.
*/
static UnifiedCache *getInstance(UErrorCode &status);
@ -294,7 +294,7 @@ class U_COMMON_API UnifiedCache : public UnifiedCacheBase {
/**
* Configures at what point evcition of unused entries will begin.
* Eviction is triggered whenever the number of unused entries exeeds
* Eviction is triggered whenever the number of evictable keys exeeds
* BOTH count AND (number of in-use items) * (percentageOfInUseItems / 100).
* Once the number of unused entries drops below one of these,
* eviction ceases. Because eviction happens incrementally,
@ -341,60 +341,214 @@ class U_COMMON_API UnifiedCache : public UnifiedCacheBase {
*/
int32_t unusedCount() const;
virtual void incrementItemsInUse() const;
virtual void decrementItemsInUseWithLockingAndEviction() const;
virtual void decrementItemsInUse() const;
virtual void handleUnreferencedObject() const;
virtual ~UnifiedCache();
private:
UHashtable *fHashtable;
mutable int32_t fEvictPos;
mutable int32_t fItemsInUseCount;
mutable int32_t fNumValuesTotal;
mutable int32_t fNumValuesInUse;
int32_t fMaxUnused;
int32_t fMaxPercentageOfInUse;
mutable int64_t fAutoEvictedCount;
SharedObject *fNoValue;
UnifiedCache(const UnifiedCache &other);
UnifiedCache &operator=(const UnifiedCache &other);
/**
* Flushes the contents of the cache. If cache values hold references to other
* cache values then _flush should be called in a loop until it returns FALSE.
*
* On entry, gCacheMutex must be held.
* On exit, those values with are evictable are flushed.
*
* @param all if false flush evictable items only, which are those with no external
* references, plus those that can be safely recreated.<br>
* if true, flush all elements. Any values (sharedObjects) with remaining
* hard (external) references are not deleted, but are detached from
* the cache, so that a subsequent removeRefs can delete them.
* _flush is not thread safe when all is true.
* @return TRUE if any value in cache was flushed or FALSE otherwise.
*/
UBool _flush(UBool all) const;
/**
* Gets value out of cache.
* On entry. gCacheMutex must not be held. value must be NULL. status
* must be U_ZERO_ERROR.
* On exit. value and status set to what is in cache at key or on cache
* miss the key's createObject() is called and value and status are set to
* the result of that. In this latter case, best effort is made to add the
* value and status to the cache. If createObject() fails to create a value,
* fNoValue is stored in cache, and value is set to NULL. Caller must call
* removeRef on value if non NULL.
*/
void _get(
const CacheKeyBase &key,
const SharedObject *&value,
const void *creationContext,
UErrorCode &status) const;
UBool _poll(
const CacheKeyBase &key,
const SharedObject *&value,
UErrorCode &status) const;
void _putNew(
const CacheKeyBase &key,
const SharedObject *value,
const UErrorCode creationStatus,
UErrorCode &status) const;
/**
* Attempts to fetch value and status for key from cache.
* On entry, gCacheMutex must not be held value must be NULL and status must
* be U_ZERO_ERROR.
* On exit, either returns FALSE (In this
* case caller should try to create the object) or returns TRUE with value
* pointing to the fetched value and status set to fetched status. When
* FALSE is returned status may be set to failure if an in progress hash
* entry could not be made but value will remain unchanged. When TRUE is
* returned, caller must call removeRef() on value.
*/
UBool _poll(
const CacheKeyBase &key,
const SharedObject *&value,
UErrorCode &status) const;
/**
* Places a new value and creationStatus in the cache for the given key.
* On entry, gCacheMutex must be held. key must not exist in the cache.
* On exit, value and creation status placed under key. Soft reference added
* to value on successful add. On error sets status.
*/
void _putNew(
const CacheKeyBase &key,
const SharedObject *value,
const UErrorCode creationStatus,
UErrorCode &status) const;
/**
* Places value and status at key if there is no value at key or if cache
* entry for key is in progress. Otherwise, it leaves the current value and
* status there.
*
* On entry. gCacheMutex must not be held. Value must be
* included in the reference count of the object to which it points.
*
* On exit, value and status are changed to what was already in the cache if
* something was there and not in progress. Otherwise, value and status are left
* unchanged in which case they are placed in the cache on a best-effort basis.
* Caller must call removeRef() on value.
*/
void _putIfAbsentAndGet(
const CacheKeyBase &key,
const SharedObject *&value,
UErrorCode &status) const;
const UHashElement *_nextElement() const;
/**
* Returns the next element in the cache round robin style.
* Returns nullptr if the cache is empty.
* On entry, gCacheMutex must be held.
*/
const UHashElement *_nextElement() const;
/**
* Return the number of cache items that would need to be evicted
* to bring usage into conformance with eviction policy.
*
* An item corresponds to an entry in the hash table, a hash table element.
*
* On entry, gCacheMutex must be held.
*/
int32_t _computeCountOfItemsToEvict() const;
/**
* Run an eviction slice.
* On entry, gCacheMutex must be held.
* _runEvictionSlice runs a slice of the evict pipeline by examining the next
* 10 entries in the cache round robin style evicting them if they are eligible.
*/
void _runEvictionSlice() const;
void _registerMaster(
const CacheKeyBase *theKey, const SharedObject *value) const;
/**
* Register a master cache entry. A master key is the first key to create
* a given SharedObject value. Subsequent keys whose create function
* produce referneces to an already existing SharedObject are not masters -
* they can be evicted and subsequently recreated.
*
* On entry, gCacheMutex must be held.
* On exit, items in use count incremented, entry is marked as a master
* entry, and value registered with cache so that subsequent calls to
* addRef() and removeRef() on it correctly interact with the cache.
*/
void _registerMaster(const CacheKeyBase *theKey, const SharedObject *value) const;
/**
* Store a value and creation error status in given hash entry.
* On entry, gCacheMutex must be held. Hash entry element must be in progress.
* value must be non NULL.
* On Exit, soft reference added to value. value and status stored in hash
* entry. Soft reference removed from previous stored value. Waiting
* threads notified.
*/
void _put(
const UHashElement *element,
const SharedObject *value,
const UErrorCode status) const;
/**
* Remove a soft reference, and delete the SharedObject if no references remain.
* To be used from within the UnifiedCache implementation only.
* gCacheMutex must be held by caller.
* @param value the SharedObject to be acted on.
*/
void removeSoftRef(const SharedObject *value) const;
/**
* Increment the hard reference count of the given SharedObject.
* gCacheMutex must be held by the caller.
* Update numValuesEvictable on transitions between zero and one reference.
*
* @param value The SharedObject to be referenced.
* @return the hard reference count after the addition.
*/
int32_t addHardRef(const SharedObject *value) const;
/**
* Decrement the hard reference count of the given SharedObject.
* gCacheMutex must be held by the caller.
* Update numValuesEvictable on transitions between one and zero reference.
*
* @param value The SharedObject to be referenced.
* @return the hard reference count after the removal.
*/
int32_t removeHardRef(const SharedObject *value) const;
#ifdef UNIFIED_CACHE_DEBUG
void _dumpContents() const;
#endif
static void copyPtr(const SharedObject *src, const SharedObject *&dest);
static void clearPtr(const SharedObject *&ptr);
static void _fetch(
const UHashElement *element,
const SharedObject *&value,
UErrorCode &status);
static UBool _inProgress(const UHashElement *element);
static UBool _inProgress(
const SharedObject *theValue, UErrorCode creationStatus);
static UBool _isEvictable(const UHashElement *element);
/**
* Fetch value and error code from a particular hash entry.
* On entry, gCacheMutex must be held. value must be either NULL or must be
* included in the ref count of the object to which it points.
* On exit, value and status set to what is in the hash entry. Caller must
* eventually call removeRef on value.
* If hash entry is in progress, value will be set to gNoValue and status will
* be set to U_ZERO_ERROR.
*/
void _fetch(const UHashElement *element, const SharedObject *&value,
UErrorCode &status) const;
/**
* Determine if given hash entry is in progress.
* On entry, gCacheMutex must be held.
*/
UBool _inProgress(const UHashElement *element) const;
/**
* Determine if given hash entry is in progress.
* On entry, gCacheMutex must be held.
*/
UBool _inProgress(const SharedObject *theValue, UErrorCode creationStatus) const;
/**
* Determine if given hash entry is eligible for eviction.
* On entry, gCacheMutex must be held.
*/
UBool _isEvictable(const UHashElement *element) const;
};
U_NAMESPACE_END

View file

@ -448,7 +448,8 @@ struct FormatThreadTestData
// "Someone from {2} is receiving a #{0} error - {1}. Their telephone call is costing {3 number,currency}."
static void formatErrorMessage(UErrorCode &realStatus, const UnicodeString& pattern, const Locale& theLocale,
UErrorCode inStatus0, /* statusString 1 */ const Locale &inCountry2, double currency3, // these numbers are the message arguments.
UErrorCode inStatus0, // statusString 1
const Locale &inCountry2, double currency3, // these numbers are the message arguments.
UnicodeString &result)
{
if(U_FAILURE(realStatus))
@ -666,13 +667,13 @@ public:
// Keep this data here to avoid static initialization.
FormatThreadTestData kNumberFormatTestData[] =
{
FormatThreadTestData((double)5.0, UnicodeString("5", "")),
FormatThreadTestData( 6.0, UnicodeString("6", "")),
FormatThreadTestData( 20.0, UnicodeString("20", "")),
FormatThreadTestData( 8.0, UnicodeString("8", "")),
FormatThreadTestData( 8.3, UnicodeString("8.3", "")),
FormatThreadTestData( 12345, UnicodeString("12,345", "")),
FormatThreadTestData( 81890.23, UnicodeString("81,890.23", "")),
FormatThreadTestData((double)5.0, UnicodeString(u"5")),
FormatThreadTestData( 6.0, UnicodeString(u"6")),
FormatThreadTestData( 20.0, UnicodeString(u"20")),
FormatThreadTestData( 8.0, UnicodeString(u"8")),
FormatThreadTestData( 8.3, UnicodeString(u"8.3")),
FormatThreadTestData( 12345, UnicodeString(u"12,345")),
FormatThreadTestData( 81890.23, UnicodeString(u"81,890.23")),
};
int32_t kNumberFormatTestDataLength = UPRV_LENGTHOF(kNumberFormatTestData);
@ -1388,7 +1389,7 @@ const UCTMultiThreadItem *LocaleCacheKey<UCTMultiThreadItem>::createObject(
} else {
result->addRef();
}
// Log that we created an object. The first object was already counted,
// don't do it again.
umtx_lock(&gCTMutex);
@ -1451,7 +1452,7 @@ void UnifiedCacheThread::exerciseByLocale(const Locale &locale) {
void UnifiedCacheThread::run() {
// Run the exercise with 2 different locales so that we can exercise
// eviction more. If each thread exerices just one locale, then
// eviction more. If each thread exercises just one locale, then
// eviction can't start until the threads end.
exerciseByLocale(fLoc);
exerciseByLocale(fLoc2);

View file

@ -153,8 +153,8 @@ void UnifiedCacheTest::TestEvictionPolicy() {
unusedReference->removeRef();
// unused count not to exeed in use count
assertEquals("", UPRV_LENGTHOF(usedReferences), cache.unusedCount());
assertEquals("", 2*UPRV_LENGTHOF(usedReferences), cache.keyCount());
assertEquals("T1", UPRV_LENGTHOF(usedReferences), cache.unusedCount());
assertEquals("T2", 2*UPRV_LENGTHOF(usedReferences), cache.keyCount());
// Free up those used entries.
for (int32_t i = 0; i < UPRV_LENGTHOF(usedReferences); i++) {
@ -162,9 +162,9 @@ void UnifiedCacheTest::TestEvictionPolicy() {
}
// This should free up all cache items
assertEquals("", 0, cache.keyCount());
assertEquals("T3", 0, cache.keyCount());
assertSuccess("", status);
assertSuccess("T4", status);
}
@ -181,7 +181,7 @@ void UnifiedCacheTest::TestBounded() {
// complete control over it. Real clients should never ever create
// their own cache!
UnifiedCache cache(status);
assertSuccess("", status);
assertSuccess("T0", status);
// Maximum unused count is 3.
cache.setEvictionPolicy(3, 0, status);
@ -202,15 +202,15 @@ void UnifiedCacheTest::TestBounded() {
const UCTItem *frFr = NULL;
cache.get(LocaleCacheKey<UCTItem>("en_US"), &cache, enUs, status);
cache.get(LocaleCacheKey<UCTItem>("en"), &cache, en, status);
assertEquals("", 1, cache.unusedCount());
assertEquals("T1", 1, cache.unusedCount());
cache.get(LocaleCacheKey<UCTItem>("en_GB"), &cache, enGb, status);
cache.get(LocaleCacheKey<UCTItem>("fr_FR"), &cache, frFr, status);
cache.get(LocaleCacheKey<UCTItem>("fr"), &cache, fr, status);
// Client holds two unique references, "en" and "fr" the other three
// entries are eligible for eviction.
assertEquals("", 3, cache.unusedCount());
assertEquals("", 5, cache.keyCount());
assertEquals("T2", 3, cache.unusedCount());
assertEquals("T3", 5, cache.keyCount());
// Exercise cache more but don't hold the references except for
// the last one. At the end of this, we will hold references to one
@ -227,40 +227,40 @@ void UnifiedCacheTest::TestBounded() {
// Client holds three unique references, "en", "fr", "de" although we
// could have a total of 8 entries in the cache maxUnusedCount == 3
// so we have only 6 entries.
assertEquals("", 3, cache.unusedCount());
assertEquals("", 6, cache.keyCount());
assertEquals("T4", 3, cache.unusedCount());
assertEquals("T5", 6, cache.keyCount());
// For all the references we have, cache must continue to return
// those same references (#2)
cache.get(LocaleCacheKey<UCTItem>("en"), &cache, throwAway, status);
if (throwAway != en) {
errln("Expected en to resolve to the same object.");
errln("T6: Expected en to resolve to the same object.");
}
cache.get(LocaleCacheKey<UCTItem>("en_US"), &cache, throwAway, status);
if (throwAway != enUs) {
errln("Expected enUs to resolve to the same object.");
errln("T7: Expected enUs to resolve to the same object.");
}
cache.get(LocaleCacheKey<UCTItem>("en_GB"), &cache, throwAway, status);
if (throwAway != enGb) {
errln("Expected enGb to resolve to the same object.");
errln("T8: Expected enGb to resolve to the same object.");
}
cache.get(LocaleCacheKey<UCTItem>("fr_FR"), &cache, throwAway, status);
if (throwAway != frFr) {
errln("Expected frFr to resolve to the same object.");
errln("T9: Expected frFr to resolve to the same object.");
}
cache.get(LocaleCacheKey<UCTItem>("fr_FR"), &cache, throwAway, status);
cache.get(LocaleCacheKey<UCTItem>("fr"), &cache, throwAway, status);
if (throwAway != fr) {
errln("Expected fr to resolve to the same object.");
errln("T10: Expected fr to resolve to the same object.");
}
cache.get(LocaleCacheKey<UCTItem>("de_AU"), &cache, throwAway, status);
if (throwAway != deAu) {
errln("Expected deAu to resolve to the same object.");
errln("T11: Expected deAu to resolve to the same object.");
}
assertEquals("", 3, cache.unusedCount());
assertEquals("", 6, cache.keyCount());
assertEquals("T12", 3, cache.unusedCount());
assertEquals("T13", 6, cache.keyCount());
// Now we hold a references to two more distinct values. Cache size
// should grow to 8.
@ -268,8 +268,8 @@ void UnifiedCacheTest::TestBounded() {
const UCTItem *ru = NULL;
cache.get(LocaleCacheKey<UCTItem>("es"), &cache, es, status);
cache.get(LocaleCacheKey<UCTItem>("ru"), &cache, ru, status);
assertEquals("", 3, cache.unusedCount());
assertEquals("", 8, cache.keyCount());
assertEquals("T14", 3, cache.unusedCount());
assertEquals("T15", 8, cache.keyCount());
// Now release all the references we hold except for
// es, ru, and en
@ -284,13 +284,13 @@ void UnifiedCacheTest::TestBounded() {
SharedObject::clearPtr(throwAway);
// Size of cache should magically drop to 3.
assertEquals("", 3, cache.unusedCount());
assertEquals("", 3, cache.keyCount());
assertEquals("T16", 3, cache.unusedCount());
assertEquals("T17", 3, cache.keyCount());
// Be sure nothing happens setting the eviction policy in the middle of
// a run.
cache.setEvictionPolicy(3, 0, status);
assertSuccess("", status);
assertSuccess("T18", status);
}
@ -311,7 +311,7 @@ void UnifiedCacheTest::TestBasic() {
cache->get(LocaleCacheKey<UCTItem>("en_GB"), enGb, status);
cache->get(LocaleCacheKey<UCTItem>("fr_FR"), frFr, status);
cache->get(LocaleCacheKey<UCTItem>("fr"), fr, status);
cache->get(LocaleCacheKey<UCTItem>("en_GB"), enGb2, status);
cache->get(LocaleCacheKey<UCTItem>("en_GB"), enGb2, status);
SharedObject::clearPtr(enGb2);
if (enGb != enUs) {
errln("Expected en_GB and en_US to resolve to same object.");
@ -322,16 +322,16 @@ void UnifiedCacheTest::TestBasic() {
if (enGb == fr) {
errln("Expected en_GB and fr to return different objects.");
}
assertSuccess("", status);
assertSuccess("T1", status);
// en_US, en_GB, en share one object; fr_FR and fr don't share.
// 5 keys in all.
assertEquals("", baseCount + 5, cache->keyCount());
assertEquals("T2", baseCount + 5, cache->keyCount());
SharedObject::clearPtr(enGb);
cache->flush();
// Only 2 unique values in the cache. flushing trims cache down
// to this minimum size.
assertEquals("", baseCount + 2, cache->keyCount());
assertEquals("T3", baseCount + 2, cache->keyCount());
SharedObject::clearPtr(enUs);
SharedObject::clearPtr(en);
cache->flush();
@ -339,14 +339,14 @@ void UnifiedCacheTest::TestBasic() {
// the "en" object, so it gets flushed and the keys that refer to it
// get removed from the cache. Now we have just one unique value, fr, in
// the cache
assertEquals("", baseCount + 1, cache->keyCount());
assertEquals("T4", baseCount + 1, cache->keyCount());
SharedObject::clearPtr(fr);
cache->flush();
assertEquals("", baseCount + 1, cache->keyCount());
assertEquals("T5", baseCount + 1, cache->keyCount());
SharedObject::clearPtr(frFr);
cache->flush();
assertEquals("", baseCount + 0, cache->keyCount());
assertSuccess("", status);
assertEquals("T6", baseCount + 0, cache->keyCount());
assertSuccess("T7", status);
}
void UnifiedCacheTest::TestError() {