summaryrefslogtreecommitdiff
path: root/sal/rtl
diff options
context:
space:
mode:
authorStephan Bergmann <sbergman@redhat.com>2018-07-31 19:14:14 +0200
committerStephan Bergmann <sbergman@redhat.com>2018-08-01 00:30:17 +0200
commitdf6ba650469a6f2fda06ef1c2e107ccdd3570505 (patch)
tree622799a3ee83fcf192e0ff4c75a6f5d7f3f865f4 /sal/rtl
parent74ea0faf1f5556a09f1cc5acb89ba54bfbd586b9 (diff)
Remove "officially dead now" rtl_cache slab allocator mechanism
...after <https://gerrit.libreoffice.org/#/c/58263/> "the custom SAL allocator is no longer used" got merged. According to my July 31, 2018 comment there: "However, [...] this change is effectively a final coffin nail for the 'rtl_cache' mechanism. It could be argued that the alleged benefits of that mechanism (if it were still working) might be real (at least on some platforms; which would need measurements), and that it should be made working again (by reverting the effects of both ce906b8096081dee15dc8cc96e570d5b0b587955 'skip tricky allocators on G_SLICE=always-malloc' and bc6a5d8e79e7d0e7d75ac107aa8e6aa275e434e9 'Disable custom allocator' on rtl_cache_alloc/free again). But it could just as well be argued that the 'rtl_cache' mechanism is effectively gone for long enough now (since end of November, 2017, with bc6a5d8e79e7d0e7d75ac107aa8e6aa275e434e9 'Disable custom allocator') without any (apparent) negative consequences, so that it can be removed for good with this change." Change-Id: I8c1e45d494fc22555a9e675ab27be9e6e404abde Reviewed-on: https://gerrit.libreoffice.org/58369 Tested-by: Jenkins Reviewed-by: Stephan Bergmann <sbergman@redhat.com>
Diffstat (limited to 'sal/rtl')
-rw-r--r--sal/rtl/alloc_cache.cxx953
-rw-r--r--sal/rtl/alloc_cache.hxx116
-rw-r--r--sal/rtl/strimp.cxx10
3 files changed, 39 insertions, 1040 deletions
diff --git a/sal/rtl/alloc_cache.cxx b/sal/rtl/alloc_cache.cxx
index 28add71c7860..b4d7a0060d39 100644
--- a/sal/rtl/alloc_cache.cxx
+++ b/sal/rtl/alloc_cache.cxx
@@ -21,38 +21,11 @@
#include "alloc_impl.hxx"
#include "alloc_arena.hxx"
#include <rtllifecycle.h>
-#include <sal/macros.h>
-#include <osl/thread.hxx>
#include <cassert>
#include <string.h>
#include <stdio.h>
-#if defined(SAL_UNX)
-#include <sys/time.h>
-#endif
-#include <algorithm>
-
-/**
- * @internal
- */
-struct rtl_cache_list_st
-{
- rtl_memory_lock_type m_lock;
- rtl_cache_type m_cache_head;
-
-#if defined(SAL_UNX)
- pthread_t m_update_thread;
- pthread_cond_t m_update_cond;
-#elif defined(_WIN32)
- HANDLE m_update_thread;
- HANDLE m_update_cond;
-#endif /* SAL_UNX || _WIN32 */
- int m_update_done;
-};
-
-static rtl_cache_list_st g_cache_list;
-
/**
provided for cache_type allocations, and hash_table resizing.
@@ -60,379 +33,9 @@ static rtl_cache_list_st g_cache_list;
*/
static rtl_arena_type * gp_cache_arena = nullptr;
-/**
- @internal
-*/
-static rtl_cache_type * gp_cache_magazine_cache = nullptr;
-
-/**
- @internal
-*/
-static rtl_cache_type * gp_cache_slab_cache = nullptr;
-
-/**
- @internal
-*/
-static rtl_cache_type * gp_cache_bufctl_cache = nullptr;
-
-#define RTL_CACHE_HASH_INDEX_IMPL(a, s, q, m) \
- ((((a) + ((a) >> (s)) + ((a) >> ((s) << 1))) >> (q)) & (m))
-
-#define RTL_CACHE_HASH_INDEX(cache, addr) \
- RTL_CACHE_HASH_INDEX_IMPL((addr), (cache)->m_hash_shift, (cache)->m_type_shift, ((cache)->m_hash_size - 1))
-
namespace
{
-void rtl_cache_hash_rescale(
- rtl_cache_type * cache,
- sal_Size new_size
-)
-{
- rtl_cache_bufctl_type ** new_table;
- sal_Size new_bytes;
-
- new_bytes = new_size * sizeof(rtl_cache_bufctl_type*);
- new_table = static_cast<rtl_cache_bufctl_type**>(rtl_arena_alloc(gp_cache_arena, &new_bytes));
-
- if (new_table)
- {
- rtl_cache_bufctl_type ** old_table;
- sal_Size old_size, i;
-
- memset (new_table, 0, new_bytes);
-
- RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_slab_lock));
-
- old_table = cache->m_hash_table;
- old_size = cache->m_hash_size;
-
- cache->m_hash_table = new_table;
- cache->m_hash_size = new_size;
- const auto bit = highbit(cache->m_hash_size);
- assert(bit > 0);
- cache->m_hash_shift = bit - 1;
-
- for (i = 0; i < old_size; i++)
- {
- rtl_cache_bufctl_type * curr = old_table[i];
- while (curr)
- {
- rtl_cache_bufctl_type * next = curr->m_next;
- rtl_cache_bufctl_type ** head;
-
- head = &(cache->m_hash_table[RTL_CACHE_HASH_INDEX(cache, curr->m_addr)]);
- curr->m_next = (*head);
- (*head) = curr;
-
- curr = next;
- }
- old_table[i] = nullptr;
- }
-
- RTL_MEMORY_LOCK_RELEASE(&(cache->m_slab_lock));
-
- if (old_table != cache->m_hash_table_0)
- {
- sal_Size old_bytes = old_size * sizeof(rtl_cache_bufctl_type*);
- rtl_arena_free (gp_cache_arena, old_table, old_bytes);
- }
- }
-}
-
-rtl_cache_bufctl_type * rtl_cache_hash_remove(
- rtl_cache_type * cache,
- sal_uIntPtr addr
-)
-{
- rtl_cache_bufctl_type ** ppHead;
- rtl_cache_bufctl_type * bufctl;
- sal_Size lookups = 0;
-
- ppHead = &(cache->m_hash_table[RTL_CACHE_HASH_INDEX(cache, addr)]);
- while ((bufctl = *ppHead))
- {
- if (bufctl->m_addr == addr)
- {
- *ppHead = bufctl->m_next;
- bufctl->m_next = nullptr;
- break;
- }
-
- lookups += 1;
- ppHead = &(bufctl->m_next);
- }
-
- assert(bufctl); // bad free
-
- if (lookups > 1)
- {
- sal_Size nbuf = static_cast<sal_Size>(cache->m_slab_stats.m_alloc - cache->m_slab_stats.m_free);
- if (nbuf > 4 * cache->m_hash_size)
- {
- if (!(cache->m_features & RTL_CACHE_FEATURE_RESCALE))
- {
- sal_Size ave = nbuf >> cache->m_hash_shift;
- const auto bit = highbit(ave);
- assert(bit > 0);
- sal_Size new_size = cache->m_hash_size << (bit - 1);
-
- cache->m_features |= RTL_CACHE_FEATURE_RESCALE;
- RTL_MEMORY_LOCK_RELEASE(&(cache->m_slab_lock));
- rtl_cache_hash_rescale (cache, new_size);
- RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_slab_lock));
- cache->m_features &= ~RTL_CACHE_FEATURE_RESCALE;
- }
- }
- }
-
- return bufctl;
-}
-
-#define RTL_CACHE_SLAB(addr, size) \
- ((reinterpret_cast<rtl_cache_slab_type*>(RTL_MEMORY_P2END(reinterpret_cast<sal_uIntPtr>(addr), (size)))) - 1)
-
-int rtl_cache_slab_constructor(void * obj, SAL_UNUSED_PARAMETER void *)
-{
- rtl_cache_slab_type * slab = static_cast<rtl_cache_slab_type*>(obj);
-
- QUEUE_START_NAMED(slab, slab_);
- slab->m_ntypes = 0;
-
- return 1;
-}
-
-void rtl_cache_slab_destructor(void * obj, SAL_UNUSED_PARAMETER void *)
-{
- rtl_cache_slab_type * slab = static_cast< rtl_cache_slab_type * >(obj);
- assert(QUEUE_STARTED_NAMED(slab, slab_)); // assure removed from queue(s)
- assert(slab->m_ntypes == 0); // assure no longer referenced
- (void) slab; // avoid warnings
-}
-
-/**
- @precond cache->m_slab_lock released.
-*/
-void rtl_cache_slab_destroy(
- rtl_cache_type * cache,
- rtl_cache_slab_type * slab
-)
-{
- void * addr = reinterpret_cast<void*>(slab->m_data);
- sal_Size refcnt = slab->m_ntypes; slab->m_ntypes = 0;
-
- if (cache->m_features & RTL_CACHE_FEATURE_HASH)
- {
- /* cleanup bufctl(s) for free buffer(s) */
- sal_Size ntypes = (slab->m_bp - slab->m_data) / cache->m_type_size;
- for (ntypes -= refcnt; slab->m_sp; ntypes--)
- {
- rtl_cache_bufctl_type * bufctl = slab->m_sp;
-
- /* pop from freelist */
- slab->m_sp = bufctl->m_next;
- bufctl->m_next = nullptr;
-
- /* return bufctl struct to bufctl cache */
- rtl_cache_free (gp_cache_bufctl_cache, bufctl);
- }
- assert(ntypes == 0);
-
- /* return slab struct to slab cache */
- rtl_cache_free (gp_cache_slab_cache, slab);
- }
- else
- {
- /* destruct embedded slab struct */
- rtl_cache_slab_destructor (slab, nullptr);
- }
-
- if (refcnt == 0 || cache->m_features & RTL_CACHE_FEATURE_BULKDESTROY)
- {
- /* free memory */
- rtl_arena_free (cache->m_source, addr, cache->m_slab_size);
- }
-}
-
-/**
- Return a buffer to slab layer; used by magazine layer.
-*/
-void rtl_cache_slab_free(
- rtl_cache_type * cache,
- void * addr
-)
-{
- rtl_cache_bufctl_type * bufctl;
- rtl_cache_slab_type * slab;
-
- RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_slab_lock));
-
- /* determine slab from addr */
- if (cache->m_features & RTL_CACHE_FEATURE_HASH)
- {
- bufctl = rtl_cache_hash_remove (cache, reinterpret_cast<sal_uIntPtr>(addr));
- slab = (bufctl != nullptr) ? reinterpret_cast<rtl_cache_slab_type*>(bufctl->m_slab) : nullptr;
- }
- else
- {
- /* embedded slab struct */
- bufctl = static_cast<rtl_cache_bufctl_type*>(addr);
- slab = RTL_CACHE_SLAB(addr, cache->m_slab_size);
- }
-
- if (slab)
- {
- /* check for full slab */
- if (slab->m_ntypes == cache->m_ntypes)
- {
- /* remove from 'used' queue */
- QUEUE_REMOVE_NAMED(slab, slab_);
-
- /* insert onto 'free' queue (head) */
- QUEUE_INSERT_HEAD_NAMED(&(cache->m_free_head), slab, slab_);
- }
-
- /* push front */
- bufctl->m_next = slab->m_sp;
- slab->m_sp = bufctl;
-
- /* update stats */
- cache->m_slab_stats.m_free += 1;
- cache->m_slab_stats.m_mem_alloc -= cache->m_type_size;
-
- /* decrement usage, check for empty slab */
- if ((slab->m_ntypes -= 1) == 0)
- {
- /* remove from 'free' queue */
- QUEUE_REMOVE_NAMED(slab, slab_);
-
- /* update stats */
- cache->m_slab_stats.m_mem_total -= cache->m_slab_size;
-
- /* free 'empty' slab */
- RTL_MEMORY_LOCK_RELEASE(&(cache->m_slab_lock));
- rtl_cache_slab_destroy (cache, slab);
- return;
- }
- }
-
- RTL_MEMORY_LOCK_RELEASE(&(cache->m_slab_lock));
-}
-
-int rtl_cache_magazine_constructor(void * obj, SAL_UNUSED_PARAMETER void *)
-{
- rtl_cache_magazine_type * mag = static_cast<rtl_cache_magazine_type*>(obj);
- /* @@@ sal_Size size = (sal_Size)(arg); @@@ */
-
- mag->m_mag_next = nullptr;
- mag->m_mag_size = RTL_CACHE_MAGAZINE_SIZE;
- mag->m_mag_used = 0;
-
- return 1;
-}
-
-void rtl_cache_magazine_destructor(void * obj, SAL_UNUSED_PARAMETER void *)
-{
- rtl_cache_magazine_type * mag = static_cast< rtl_cache_magazine_type * >(
- obj);
- assert(!mag->m_mag_next); // assure removed from queue(s)
- assert(mag->m_mag_used == 0); // assure no longer referenced
- (void) mag; // avoid warnings
-}
-
-void rtl_cache_magazine_clear(
- rtl_cache_type * cache,
- rtl_cache_magazine_type * mag
-)
-{
- for (; mag->m_mag_used > 0; --mag->m_mag_used)
- {
- void * obj = mag->m_objects[mag->m_mag_used-1];
- mag->m_objects[mag->m_mag_used-1] = nullptr;
-
- if (cache->m_destructor)
- {
- /* destruct object */
- (cache->m_destructor)(obj, cache->m_userarg);
- }
-
- /* return buffer to slab layer */
- rtl_cache_slab_free (cache, obj);
- }
-}
-
-/**
- @precond cache->m_depot_lock acquired.
-*/
-inline rtl_cache_magazine_type * rtl_cache_depot_dequeue(
- rtl_cache_depot_type * depot
-)
-{
- rtl_cache_magazine_type * mag = nullptr;
- if (depot->m_mag_count > 0)
- {
- /* dequeue magazine */
- assert(depot->m_mag_next);
-
- mag = depot->m_mag_next;
- depot->m_mag_next = mag->m_mag_next;
- mag->m_mag_next = nullptr;
-
- /* update depot stats */
- depot->m_mag_count--;
- if(depot->m_curr_min > depot->m_mag_count)
- {
- depot->m_curr_min = depot->m_mag_count;
- }
- }
- return mag;
-}
-
-
-void rtl_cache_constructor(void * obj)
-{
- rtl_cache_type * cache = static_cast<rtl_cache_type*>(obj);
-
- memset (cache, 0, sizeof(rtl_cache_type));
-
- /* linkage */
- QUEUE_START_NAMED(cache, cache_);
-
- /* slab layer */
- RTL_MEMORY_LOCK_INIT(&(cache->m_slab_lock));
-
- QUEUE_START_NAMED(&(cache->m_free_head), slab_);
- QUEUE_START_NAMED(&(cache->m_used_head), slab_);
-
- cache->m_hash_table = cache->m_hash_table_0;
- cache->m_hash_size = RTL_CACHE_HASH_SIZE;
- cache->m_hash_shift = highbit(cache->m_hash_size) - 1;
-
- /* depot layer */
- RTL_MEMORY_LOCK_INIT(&(cache->m_depot_lock));
-}
-
-void rtl_cache_destructor(void * obj)
-{
- rtl_cache_type * cache = static_cast<rtl_cache_type*>(obj);
-
- /* linkage */
- assert(QUEUE_STARTED_NAMED(cache, cache_));
-
- /* slab layer */
- RTL_MEMORY_LOCK_DESTROY(&(cache->m_slab_lock));
-
- assert(QUEUE_STARTED_NAMED(&(cache->m_free_head), slab_));
- assert(QUEUE_STARTED_NAMED(&(cache->m_used_head), slab_));
-
- assert(cache->m_hash_table == cache->m_hash_table_0);
- assert(cache->m_hash_size == RTL_CACHE_HASH_SIZE);
- assert(cache->m_hash_shift == highbit(cache->m_hash_size) - 1);
-
- /* depot layer */
- RTL_MEMORY_LOCK_DESTROY(&(cache->m_depot_lock));
-}
-
rtl_cache_type * rtl_cache_activate(
rtl_cache_type * cache,
const char * name,
@@ -440,221 +43,38 @@ rtl_cache_type * rtl_cache_activate(
size_t objalign,
int (SAL_CALL * constructor)(void * obj, void * userarg),
void (SAL_CALL * destructor) (void * obj, void * userarg),
- void * userarg,
- rtl_arena_type * source,
- int flags
+ void * userarg
)
{
assert(cache);
- if (cache)
- {
- sal_Size slabsize;
-
- snprintf (cache->m_name, sizeof(cache->m_name), "%s", name);
-
- /* ensure minimum size (embedded bufctl linkage) */
- if(objsize < sizeof(rtl_cache_bufctl_type*))
- {
- objsize = sizeof(rtl_cache_bufctl_type*);
- }
-
- if (objalign == 0)
- {
- /* determine default alignment */
- if (objsize >= RTL_MEMORY_ALIGNMENT_8)
- objalign = RTL_MEMORY_ALIGNMENT_8;
- else
- objalign = RTL_MEMORY_ALIGNMENT_4;
- }
- else
- {
- /* ensure minimum alignment */
- if(objalign < RTL_MEMORY_ALIGNMENT_4)
- {
- objalign = RTL_MEMORY_ALIGNMENT_4;
- }
- }
- assert(RTL_MEMORY_ISP2(objalign));
-
- cache->m_type_size = objsize = RTL_MEMORY_P2ROUNDUP(objsize, objalign);
- cache->m_type_align = objalign;
- cache->m_type_shift = highbit(cache->m_type_size) - 1;
- cache->m_constructor = constructor;
- cache->m_destructor = destructor;
- cache->m_userarg = userarg;
+ snprintf (cache->m_name, sizeof(cache->m_name), "%s", name);
- /* slab layer */
- cache->m_source = source;
-
- slabsize = source->m_quantum; /* minimum slab size */
- /* waste at most 1/8 of slab */
- if(slabsize < cache->m_type_size * 8)
- {
- slabsize = cache->m_type_size * 8;
- }
-
- slabsize = RTL_MEMORY_P2ROUNDUP(slabsize, source->m_quantum);
- if (!RTL_MEMORY_ISP2(slabsize))
- slabsize = ((sal_Size(1)) << highbit(slabsize));
- cache->m_slab_size = slabsize;
-
- if (cache->m_slab_size > source->m_quantum)
- {
- assert(gp_cache_slab_cache);
- assert(gp_cache_bufctl_cache);
-
- cache->m_features |= RTL_CACHE_FEATURE_HASH;
- cache->m_ntypes = cache->m_slab_size / cache->m_type_size;
- cache->m_ncolor_max = cache->m_slab_size % cache->m_type_size;
- }
+ if (objalign == 0)
+ {
+ /* determine default alignment */
+ if (objsize >= RTL_MEMORY_ALIGNMENT_8)
+ objalign = RTL_MEMORY_ALIGNMENT_8;
else
- {
- /* embedded slab struct */
- cache->m_ntypes = (cache->m_slab_size - sizeof(rtl_cache_slab_type)) / cache->m_type_size;
- cache->m_ncolor_max = (cache->m_slab_size - sizeof(rtl_cache_slab_type)) % cache->m_type_size;
- }
-
- assert(cache->m_ntypes > 0);
- cache->m_ncolor = 0;
-
- if (flags & RTL_CACHE_FLAG_BULKDESTROY)
- {
- /* allow bulk slab delete upon cache deactivation */
- cache->m_features |= RTL_CACHE_FEATURE_BULKDESTROY;
- }
-
- /* magazine layer */
- if (!(flags & RTL_CACHE_FLAG_NOMAGAZINE))
- {
- assert(gp_cache_magazine_cache);
- cache->m_magazine_cache = gp_cache_magazine_cache;
- }
-
- /* insert into cache list */
- RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
- QUEUE_INSERT_TAIL_NAMED(&(g_cache_list.m_cache_head), cache, cache_);
- RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
+ objalign = RTL_MEMORY_ALIGNMENT_4;
}
- return cache;
-}
-
-void rtl_cache_deactivate(rtl_cache_type * cache)
-{
- /* remove from cache list */
- RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
- bool active = !QUEUE_STARTED_NAMED(cache, cache_);
- QUEUE_REMOVE_NAMED(cache, cache_);
- RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
-
- assert(active); // orphaned cache
- (void)active;
-
- /* cleanup magazine layer */
- if (cache->m_magazine_cache)
+ else
{
- rtl_cache_type * mag_cache;
- rtl_cache_magazine_type * mag;
-
- /* prevent recursion */
- mag_cache = cache->m_magazine_cache;
- cache->m_magazine_cache = nullptr;
-
- /* cleanup cpu layer */
- if ((mag = cache->m_cpu_curr))
- {
- // coverity[missing_lock] - locking is fine
- cache->m_cpu_curr = nullptr;
- rtl_cache_magazine_clear (cache, mag);
- rtl_cache_free (mag_cache, mag);
- }
-
- if ((mag = cache->m_cpu_prev))
- {
- // coverity[missing_lock] - locking is fine
- cache->m_cpu_prev = nullptr;
- rtl_cache_magazine_clear (cache, mag);
- rtl_cache_free (mag_cache, mag);
- }
-
- /* cleanup depot layer */
- while ((mag = rtl_cache_depot_dequeue(&(cache->m_depot_full))))
- {
- rtl_cache_magazine_clear (cache, mag);
- rtl_cache_free (mag_cache, mag);
- }
-
- while ((mag = rtl_cache_depot_dequeue(&(cache->m_depot_empty))))
+ /* ensure minimum alignment */
+ if(objalign < RTL_MEMORY_ALIGNMENT_4)
{
- rtl_cache_magazine_clear (cache, mag);
- rtl_cache_free (mag_cache, mag);
+ objalign = RTL_MEMORY_ALIGNMENT_4;
}
}
+ assert(RTL_MEMORY_ISP2(objalign));
- /* cleanup slab layer */
- if (cache->m_slab_stats.m_alloc > cache->m_slab_stats.m_free)
- {
- if (cache->m_features & RTL_CACHE_FEATURE_HASH)
- {
- /* cleanup bufctl(s) for leaking buffer(s) */
- sal_Size i, n = cache->m_hash_size;
- for (i = 0; i < n; i++)
- {
- rtl_cache_bufctl_type * bufctl;
- while ((bufctl = cache->m_hash_table[i]))
- {
- /* pop from hash table */
- cache->m_hash_table[i] = bufctl->m_next;
- bufctl->m_next = nullptr;
-
- /* return to bufctl cache */
- rtl_cache_free (gp_cache_bufctl_cache, bufctl);
- }
- }
- }
- {
- /* force cleanup of remaining slabs */
- rtl_cache_slab_type *head, *slab;
-
- head = &(cache->m_used_head);
- for (slab = head->m_slab_next; slab != head; slab = head->m_slab_next)
- {
- /* remove from 'used' queue */
- QUEUE_REMOVE_NAMED(slab, slab_);
-
- /* update stats */
- cache->m_slab_stats.m_mem_total -= cache->m_slab_size;
-
- /* free slab */
- rtl_cache_slab_destroy (cache, slab);
- }
+ cache->m_type_size = objsize = RTL_MEMORY_P2ROUNDUP(objsize, objalign);
- head = &(cache->m_free_head);
- for (slab = head->m_slab_next; slab != head; slab = head->m_slab_next)
- {
- /* remove from 'free' queue */
- QUEUE_REMOVE_NAMED(slab, slab_);
+ cache->m_constructor = constructor;
+ cache->m_destructor = destructor;
+ cache->m_userarg = userarg;
- /* update stats */
- cache->m_slab_stats.m_mem_total -= cache->m_slab_size;
-
- /* free slab */
- rtl_cache_slab_destroy (cache, slab);
- }
- }
- }
-
- if (cache->m_hash_table != cache->m_hash_table_0)
- {
- rtl_arena_free (
- gp_cache_arena,
- cache->m_hash_table,
- cache->m_hash_size * sizeof(rtl_cache_bufctl_type*));
-
- cache->m_hash_table = cache->m_hash_table_0;
- cache->m_hash_size = RTL_CACHE_HASH_SIZE;
- cache->m_hash_shift = highbit(cache->m_hash_size) - 1;
- }
+ return cache;
}
} //namespace
@@ -667,8 +87,8 @@ rtl_cache_type * SAL_CALL rtl_cache_create(
void (SAL_CALL * destructor) (void * obj, void * userarg),
void (SAL_CALL * /*reclaim*/) (void * userarg),
void * userarg,
- rtl_arena_type * source,
- int flags
+ rtl_arena_type *,
+ int
) SAL_THROW_EXTERN_C()
{
rtl_cache_type * result = nullptr;
@@ -679,14 +99,7 @@ try_alloc:
if (result)
{
rtl_cache_type * cache = result;
- rtl_cache_constructor (cache);
-
- if (!source)
- {
- /* use default arena */
- assert(gp_default_arena);
- source = gp_default_arena;
- }
+ memset (cache, 0, sizeof(rtl_cache_type));
result = rtl_cache_activate (
cache,
@@ -695,16 +108,12 @@ try_alloc:
objalign,
constructor,
destructor,
- userarg,
- source,
- flags
+ userarg
);
if (!result)
{
/* activation failed */
- rtl_cache_deactivate (cache);
- rtl_cache_destructor (cache);
rtl_arena_free (gp_cache_arena, cache, size);
}
}
@@ -724,8 +133,6 @@ void SAL_CALL rtl_cache_destroy(rtl_cache_type * cache) SAL_THROW_EXTERN_C()
{
if (cache)
{
- rtl_cache_deactivate (cache);
- rtl_cache_destructor (cache);
rtl_arena_free (gp_cache_arena, cache, sizeof(rtl_cache_type));
}
}
@@ -776,51 +183,6 @@ void SAL_CALL rtl_secureZeroMemory(void *Ptr, sal_Size Bytes) SAL_THROW_EXTERN_C
*p++ = 0;
}
-static void * rtl_cache_wsupdate_all(void * arg);
-
-static void rtl_cache_wsupdate_init()
-{
- RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
- g_cache_list.m_update_done = 0;
- (void) pthread_cond_init (&(g_cache_list.m_update_cond), nullptr);
- if (pthread_create (
- &(g_cache_list.m_update_thread), nullptr, rtl_cache_wsupdate_all, reinterpret_cast<void*>(10)) != 0)
- {
- /* failure */
- g_cache_list.m_update_thread = pthread_t();
- }
- RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
-}
-
-static void rtl_cache_wsupdate_wait(unsigned int seconds)
-{
- if (seconds > 0)
- {
- timeval now;
- timespec wakeup;
-
- gettimeofday(&now, nullptr);
- wakeup.tv_sec = now.tv_sec + seconds;
- wakeup.tv_nsec = now.tv_usec * 1000;
-
- (void) pthread_cond_timedwait (
- &(g_cache_list.m_update_cond),
- &(g_cache_list.m_lock),
- &wakeup);
- }
-}
-
-static void rtl_cache_wsupdate_fini()
-{
- RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
- g_cache_list.m_update_done = 1;
- pthread_cond_signal (&(g_cache_list.m_update_cond));
- RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
-
- if (g_cache_list.m_update_thread != pthread_t())
- pthread_join (g_cache_list.m_update_thread, nullptr);
-}
-
#elif defined(_WIN32)
void SAL_CALL rtl_secureZeroMemory(void *Ptr, sal_Size Bytes) SAL_THROW_EXTERN_C()
@@ -828,273 +190,34 @@ void SAL_CALL rtl_secureZeroMemory(void *Ptr, sal_Size Bytes) SAL_THROW_EXTERN_C
RtlSecureZeroMemory(Ptr, Bytes);
}
-static DWORD WINAPI rtl_cache_wsupdate_all(void * arg);
-
-static void rtl_cache_wsupdate_init()
-{
- DWORD dwThreadId;
-
- RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
- g_cache_list.m_update_done = 0;
- g_cache_list.m_update_cond = CreateEventW (nullptr, TRUE, FALSE, nullptr);
-
- g_cache_list.m_update_thread =
- CreateThread (nullptr, 0, rtl_cache_wsupdate_all, reinterpret_cast<LPVOID>(10), 0, &dwThreadId);
- RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
-}
-
-static void rtl_cache_wsupdate_wait(unsigned int seconds)
-{
- if (seconds > 0)
- {
- RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
- WaitForSingleObject (g_cache_list.m_update_cond, static_cast<DWORD>(seconds * 1000));
- RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
- }
-}
-
-static void rtl_cache_wsupdate_fini()
-{
- RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
- g_cache_list.m_update_done = 1;
- SetEvent (g_cache_list.m_update_cond);
- RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
-
- WaitForSingleObject (g_cache_list.m_update_thread, INFINITE);
-}
-
#endif /* SAL_UNX || _WIN32 */
-/**
- update depot stats and purge excess magazines.
-
- @precond cache->m_depot_lock acquired
-*/
-static void rtl_cache_depot_wsupdate(
- rtl_cache_type * cache,
- rtl_cache_depot_type * depot
-)
-{
- sal_Size npurge;
-
- depot->m_prev_min = depot->m_curr_min;
- depot->m_curr_min = depot->m_mag_count;
-
- npurge = std::min(depot->m_curr_min, depot->m_prev_min);
- for (; npurge > 0; npurge--)
- {
- rtl_cache_magazine_type * mag = rtl_cache_depot_dequeue (depot);
- if (mag)
- {
- RTL_MEMORY_LOCK_RELEASE(&(cache->m_depot_lock));
- rtl_cache_magazine_clear (cache, mag);
- rtl_cache_free (cache->m_magazine_cache, mag);
- RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_depot_lock));
- }
- }
- // coverity[missing_unlock] - locking is fine
-}
-
-/**
- @precond cache->m_depot_lock released
-*/
-static void rtl_cache_wsupdate(rtl_cache_type * cache)
-{
- if (cache->m_magazine_cache)
- {
- RTL_MEMORY_LOCK_ACQUIRE(&(cache->m_depot_lock));
-
- rtl_cache_depot_wsupdate (cache, &(cache->m_depot_full));
- rtl_cache_depot_wsupdate (cache, &(cache->m_depot_empty));
-
- RTL_MEMORY_LOCK_RELEASE(&(cache->m_depot_lock));
- }
-}
-
-#if defined(SAL_UNX)
-static void *
-#elif defined(_WIN32)
-static DWORD WINAPI
-#endif /* SAL_UNX || _WIN32 */
-rtl_cache_wsupdate_all(void * arg)
-{
- osl::Thread::setName("rtl_cache_wsupdate_all");
- unsigned int seconds = sal::static_int_cast< unsigned int >(
- reinterpret_cast< sal_uIntPtr >(arg));
-
- RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
- while (!g_cache_list.m_update_done)
- {
- rtl_cache_wsupdate_wait (seconds);
- if (!g_cache_list.m_update_done)
- {
- rtl_cache_type * head, * cache;
-
- head = &(g_cache_list.m_cache_head);
- for (cache = head->m_cache_next;
- cache != head;
- cache = cache->m_cache_next)
- {
- rtl_cache_wsupdate (cache);
- }
- }
- }
- RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
-
-#if defined(SAL_UNX)
- return nullptr;
-#elif defined(_WIN32)
- return 0;
-#endif
-}
-
-void rtl_cache_stop_threads(void)
-{
- rtl_cache_wsupdate_fini();
-}
-
-void rtl_cache_start_threads(void)
-{
- rtl_cache_wsupdate_init();
-}
-
void rtl_cache_init()
{
- {
- /* list of caches */
- RTL_MEMORY_LOCK_INIT(&(g_cache_list.m_lock));
- rtl_cache_constructor (&(g_cache_list.m_cache_head));
- }
- {
- /* cache: internal arena */
- assert(!gp_cache_arena);
-
- gp_cache_arena = rtl_arena_create (
- "rtl_cache_internal_arena",
- 64, /* quantum */
- 0, /* no quantum caching */
- nullptr, /* default source */
- rtl_arena_alloc,
- rtl_arena_free,
- 0 /* flags */
- );
- assert(gp_cache_arena);
-
- /* check 'gp_default_arena' initialization */
- assert(gp_default_arena);
- }
- {
- /* cache: magazine cache */
- static rtl_cache_type g_cache_magazine_cache;
-
- assert(!gp_cache_magazine_cache);
- rtl_cache_constructor (&g_cache_magazine_cache);
-
- gp_cache_magazine_cache = rtl_cache_activate (
- &g_cache_magazine_cache,
- "rtl_cache_magazine_cache",
- sizeof(rtl_cache_magazine_type), /* objsize */
- 0, /* objalign */
- rtl_cache_magazine_constructor,
- rtl_cache_magazine_destructor,
- nullptr, /* userarg: NYI */
- gp_default_arena, /* source */
- RTL_CACHE_FLAG_NOMAGAZINE /* during bootstrap; activated below */
- );
- assert(gp_cache_magazine_cache);
-
- /* activate magazine layer */
- g_cache_magazine_cache.m_magazine_cache = gp_cache_magazine_cache;
- }
- {
- /* cache: slab (struct) cache */
- static rtl_cache_type g_cache_slab_cache;
-
- assert(!gp_cache_slab_cache);
- rtl_cache_constructor (&g_cache_slab_cache);
-
- gp_cache_slab_cache = rtl_cache_activate (
- &g_cache_slab_cache,
- "rtl_cache_slab_cache",
- sizeof(rtl_cache_slab_type), /* objsize */
- 0, /* objalign */
- rtl_cache_slab_constructor,
- rtl_cache_slab_destructor,
- nullptr, /* userarg: none */
- gp_default_arena, /* source */
- 0 /* flags: none */
- );
- assert(gp_cache_slab_cache);
- }
- {
- /* cache: bufctl cache */
- static rtl_cache_type g_cache_bufctl_cache;
-
- assert(!gp_cache_bufctl_cache);
- rtl_cache_constructor (&g_cache_bufctl_cache);
+ /* cache: internal arena */
+ assert(!gp_cache_arena);
- gp_cache_bufctl_cache = rtl_cache_activate (
- &g_cache_bufctl_cache,
- "rtl_cache_bufctl_cache",
- sizeof(rtl_cache_bufctl_type), /* objsize */
- 0, /* objalign */
- nullptr, /* constructor */
- nullptr, /* destructor */
- nullptr, /* userarg */
- gp_default_arena, /* source */
- 0 /* flags: none */
- );
- assert(gp_cache_bufctl_cache);
- }
+ gp_cache_arena = rtl_arena_create (
+ "rtl_cache_internal_arena",
+ 64, /* quantum */
+ 0, /* no quantum caching */
+ nullptr, /* default source */
+ rtl_arena_alloc,
+ rtl_arena_free,
+ 0 /* flags */
+ );
+ assert(gp_cache_arena);
- rtl_cache_wsupdate_init();
+ /* check 'gp_default_arena' initialization */
+ assert(gp_default_arena);
}
void rtl_cache_fini()
{
if (gp_cache_arena)
{
- rtl_cache_type * cache, * head;
-
- rtl_cache_wsupdate_fini();
-
- if (gp_cache_bufctl_cache)
- {
- cache = gp_cache_bufctl_cache;
- gp_cache_bufctl_cache = nullptr;
- rtl_cache_deactivate (cache);
- rtl_cache_destructor (cache);
- }
-
- if (gp_cache_slab_cache)
- {
- cache = gp_cache_slab_cache;
- gp_cache_slab_cache = nullptr;
- rtl_cache_deactivate (cache);
- rtl_cache_destructor (cache);
- }
-
- if (gp_cache_magazine_cache)
- {
- cache = gp_cache_magazine_cache;
- gp_cache_magazine_cache = nullptr;
- rtl_cache_deactivate (cache);
- rtl_cache_destructor (cache);
- }
-
- if (gp_cache_arena)
- {
- rtl_arena_destroy (gp_cache_arena);
- gp_cache_arena = nullptr;
- }
-
- RTL_MEMORY_LOCK_ACQUIRE(&(g_cache_list.m_lock));
- head = &(g_cache_list.m_cache_head);
- for (cache = head->m_cache_next; cache != head; cache = cache->m_cache_next)
- {
- // noop
- }
- RTL_MEMORY_LOCK_RELEASE(&(g_cache_list.m_lock));
+ rtl_arena_destroy (gp_cache_arena);
+ gp_cache_arena = nullptr;
}
}
diff --git a/sal/rtl/alloc_cache.hxx b/sal/rtl/alloc_cache.hxx
index 3e467ade3cd2..501d5770b3c6 100644
--- a/sal/rtl/alloc_cache.hxx
+++ b/sal/rtl/alloc_cache.hxx
@@ -22,133 +22,17 @@
#include <sal/types.h>
#include <rtl/alloc.h>
-#include "alloc_impl.hxx"
-
-/**
- @internal
-*/
-struct rtl_cache_stat_type
-{
- sal_uInt64 m_alloc;
- sal_uInt64 m_free;
-
- sal_Size m_mem_total;
- sal_Size m_mem_alloc;
-};
-
-/**
- @internal
-*/
-struct rtl_cache_bufctl_type
-{
- rtl_cache_bufctl_type * m_next; /* linkage */
-
- sal_uIntPtr m_addr; /* buffer address */
- sal_uIntPtr m_slab; /* parent slab address */
-};
-
-/**
- @internal
-*/
-struct rtl_cache_slab_type
-{
- rtl_cache_slab_type * m_slab_next; /* slab linkage */
- rtl_cache_slab_type * m_slab_prev; /* slab linkage */
-
- sal_Size m_ntypes; /* number of buffers used */
- sal_uIntPtr m_data; /* buffer start addr */
-
- sal_uIntPtr m_bp; /* free buffer linkage 'base pointer' */
- rtl_cache_bufctl_type * m_sp; /* free buffer linkage 'stack pointer' */
-};
-
-/**
- @internal
-*/
-#define RTL_CACHE_MAGAZINE_SIZE 61
-
-struct rtl_cache_magazine_type
-{
- rtl_cache_magazine_type * m_mag_next; /* depot linkage */
-
- sal_Size m_mag_size;
- sal_Size m_mag_used;
-
- void * m_objects[RTL_CACHE_MAGAZINE_SIZE];
-};
-
-/**
- @internal
-*/
-struct rtl_cache_depot_type
-{
- /* magazine list */
- rtl_cache_magazine_type * m_mag_next; /* linkage */
- sal_Size m_mag_count; /* count */
-
- /* working set parameters */
- sal_Size m_curr_min;
- sal_Size m_prev_min;
-};
-
-/**
- @internal
-*/
-#define RTL_CACHE_HASH_SIZE 8
-
-#define RTL_CACHE_FEATURE_HASH 1
-#define RTL_CACHE_FEATURE_BULKDESTROY 2
-#define RTL_CACHE_FEATURE_RESCALE 4 /* within hash rescale operation */
struct rtl_cache_st
{
- /* linkage */
- rtl_cache_type * m_cache_next;
- rtl_cache_type * m_cache_prev;
-
/* properties */
char m_name[RTL_CACHE_NAME_LENGTH + 1];
- long m_features;
sal_Size m_type_size; /* const */
- sal_Size m_type_align; /* const */
- sal_Size m_type_shift; /* log2(m_type_size); const */
int (SAL_CALL * m_constructor)(void * obj, void * userarg); /* const */
void (SAL_CALL * m_destructor) (void * obj, void * userarg); /* const */
void * m_userarg;
-
- /* slab layer */
- rtl_memory_lock_type m_slab_lock;
- rtl_cache_stat_type m_slab_stats;
-
- rtl_arena_type * m_source; /* slab supplier; const */
- sal_Size m_slab_size; /* const */
- sal_Size m_ntypes; /* number of buffers per slab; const */
- sal_Size m_ncolor; /* next slab color */
- sal_Size m_ncolor_max; /* max. slab color */
-
- rtl_cache_slab_type m_free_head;
- rtl_cache_slab_type m_used_head;
-
- rtl_cache_bufctl_type ** m_hash_table;
- rtl_cache_bufctl_type * m_hash_table_0[RTL_CACHE_HASH_SIZE];
- sal_Size m_hash_size; /* m_hash_mask + 1 */
- sal_Size m_hash_shift; /* log2(m_hash_size) */
-
- /* depot layer */
- rtl_memory_lock_type m_depot_lock;
-
- rtl_cache_depot_type m_depot_empty;
- rtl_cache_depot_type m_depot_full;
-
- rtl_cache_type * m_magazine_cache; /* magazine supplier; const */
-
- /* cpu layer */
- rtl_cache_magazine_type * m_cpu_curr;
- rtl_cache_magazine_type * m_cpu_prev;
-
- rtl_cache_stat_type m_cpu_stats;
};
#endif // INCLUDED_SAL_RTL_ALLOC_CACHE_HXX
diff --git a/sal/rtl/strimp.cxx b/sal/rtl/strimp.cxx
index c590fbcc4d52..e356a4e921a6 100644
--- a/sal/rtl/strimp.cxx
+++ b/sal/rtl/strimp.cxx
@@ -118,19 +118,11 @@ void SAL_CALL rtl_alloc_preInit (rtl_alloc_preInit_phase_t phase) SAL_THROW_EXTE
rtl_allocateString = rtl_allocateMemory;
rtl_freeString = rtl_freeMemory;
- // Stop the rtl cache thread to have no extra threads while forking.
- rtl_cache_stop_threads();
-
// TODO: also re-initialize main allocator as well.
}
break;
- case rtlAllocPostInit:
- {
- // We have forked and need to restart threads and anything
- // that must start after forking.
- rtl_cache_start_threads();
- }
+ case rtlAllocPostInit: // no longer used
break;
}
}