2004-08-26 18:35:05 +00:00
|
|
|
/* Cache memory handling.
|
2021-01-02 19:32:25 +00:00
|
|
|
Copyright (C) 2004-2021 Free Software Foundation, Inc.
|
2004-08-26 18:35:05 +00:00
|
|
|
This file is part of the GNU C Library.
|
|
|
|
|
2005-12-07 05:49:17 +00:00
|
|
|
This program is free software; you can redistribute it and/or modify
|
2007-07-16 00:56:07 +00:00
|
|
|
it under the terms of the GNU General Public License as published
|
|
|
|
by the Free Software Foundation; version 2 of the License, or
|
|
|
|
(at your option) any later version.
|
2004-08-26 18:35:05 +00:00
|
|
|
|
2005-12-07 05:49:17 +00:00
|
|
|
This program is distributed in the hope that it will be useful,
|
2004-08-26 18:35:05 +00:00
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
2005-12-07 05:49:17 +00:00
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
GNU General Public License for more details.
|
2004-08-26 18:35:05 +00:00
|
|
|
|
2005-12-07 05:49:17 +00:00
|
|
|
You should have received a copy of the GNU General Public License
|
Prefer https to http for gnu.org and fsf.org URLs
Also, change sources.redhat.com to sourceware.org.
This patch was automatically generated by running the following shell
script, which uses GNU sed, and which avoids modifying files imported
from upstream:
sed -ri '
s,(http|ftp)(://(.*\.)?(gnu|fsf|sourceware)\.org($|[^.]|\.[^a-z])),https\2,g
s,(http|ftp)(://(.*\.)?)sources\.redhat\.com($|[^.]|\.[^a-z]),https\2sourceware.org\4,g
' \
$(find $(git ls-files) -prune -type f \
! -name '*.po' \
! -name 'ChangeLog*' \
! -path COPYING ! -path COPYING.LIB \
! -path manual/fdl-1.3.texi ! -path manual/lgpl-2.1.texi \
! -path manual/texinfo.tex ! -path scripts/config.guess \
! -path scripts/config.sub ! -path scripts/install-sh \
! -path scripts/mkinstalldirs ! -path scripts/move-if-change \
! -path INSTALL ! -path locale/programs/charmap-kw.h \
! -path po/libc.pot ! -path sysdeps/gnu/errlist.c \
! '(' -name configure \
-execdir test -f configure.ac -o -f configure.in ';' ')' \
! '(' -name preconfigure \
-execdir test -f preconfigure.ac ';' ')' \
-print)
and then by running 'make dist-prepare' to regenerate files built
from the altered files, and then executing the following to cleanup:
chmod a+x sysdeps/unix/sysv/linux/riscv/configure
# Omit irrelevant whitespace and comment-only changes,
# perhaps from a slightly-different Autoconf version.
git checkout -f \
sysdeps/csky/configure \
sysdeps/hppa/configure \
sysdeps/riscv/configure \
sysdeps/unix/sysv/linux/csky/configure
# Omit changes that caused a pre-commit check to fail like this:
# remote: *** error: sysdeps/powerpc/powerpc64/ppc-mcount.S: trailing lines
git checkout -f \
sysdeps/powerpc/powerpc64/ppc-mcount.S \
sysdeps/unix/sysv/linux/s390/s390-64/syscall.S
# Omit change that caused a pre-commit check to fail like this:
# remote: *** error: sysdeps/sparc/sparc64/multiarch/memcpy-ultra3.S: last line does not end in newline
git checkout -f sysdeps/sparc/sparc64/multiarch/memcpy-ultra3.S
2019-09-07 05:40:42 +00:00
|
|
|
along with this program; if not, see <https://www.gnu.org/licenses/>. */
|
2004-08-26 18:35:05 +00:00
|
|
|
|
|
|
|
#include <assert.h>
|
|
|
|
#include <errno.h>
|
|
|
|
#include <error.h>
|
2005-08-08 20:42:33 +00:00
|
|
|
#include <fcntl.h>
|
2004-08-26 18:35:05 +00:00
|
|
|
#include <inttypes.h>
|
|
|
|
#include <libintl.h>
|
|
|
|
#include <limits.h>
|
2008-05-18 06:29:10 +00:00
|
|
|
#include <obstack.h>
|
2004-08-26 18:35:05 +00:00
|
|
|
#include <stdlib.h>
|
|
|
|
#include <string.h>
|
|
|
|
#include <unistd.h>
|
|
|
|
#include <sys/mman.h>
|
|
|
|
#include <sys/param.h>
|
|
|
|
|
|
|
|
#include "dbg_log.h"
|
|
|
|
#include "nscd.h"
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
sort_he (const void *p1, const void *p2)
|
|
|
|
{
|
|
|
|
struct hashentry *h1 = *(struct hashentry **) p1;
|
|
|
|
struct hashentry *h2 = *(struct hashentry **) p2;
|
|
|
|
|
|
|
|
if (h1 < h2)
|
|
|
|
return -1;
|
|
|
|
if (h1 > h2)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
sort_he_data (const void *p1, const void *p2)
|
|
|
|
{
|
|
|
|
struct hashentry *h1 = *(struct hashentry **) p1;
|
|
|
|
struct hashentry *h2 = *(struct hashentry **) p2;
|
|
|
|
|
|
|
|
if (h1->packet < h2->packet)
|
|
|
|
return -1;
|
|
|
|
if (h1->packet > h2->packet)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* Basic definitions for the bitmap implementation. Only BITMAP_T
|
|
|
|
needs to be changed to choose a different word size. */
|
|
|
|
#define BITMAP_T uint8_t
|
|
|
|
#define BITS (CHAR_BIT * sizeof (BITMAP_T))
|
|
|
|
#define ALLBITS ((((BITMAP_T) 1) << BITS) - 1)
|
|
|
|
#define HIGHBIT (((BITMAP_T) 1) << (BITS - 1))
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
markrange (BITMAP_T *mark, ref_t start, size_t len)
|
|
|
|
{
|
|
|
|
/* Adjust parameters for block alignment. */
|
2008-05-18 04:17:14 +00:00
|
|
|
assert ((start & BLOCK_ALIGN_M1) == 0);
|
2004-08-26 18:35:05 +00:00
|
|
|
start /= BLOCK_ALIGN;
|
|
|
|
len = (len + BLOCK_ALIGN_M1) / BLOCK_ALIGN;
|
|
|
|
|
|
|
|
size_t elem = start / BITS;
|
|
|
|
|
|
|
|
if (start % BITS != 0)
|
|
|
|
{
|
|
|
|
if (start % BITS + len <= BITS)
|
|
|
|
{
|
|
|
|
/* All fits in the partial byte. */
|
|
|
|
mark[elem] |= (ALLBITS >> (BITS - len)) << (start % BITS);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2008-05-18 04:17:14 +00:00
|
|
|
mark[elem++] |= ALLBITS << (start % BITS);
|
2004-08-26 18:35:05 +00:00
|
|
|
len -= BITS - (start % BITS);
|
|
|
|
}
|
|
|
|
|
|
|
|
while (len >= BITS)
|
|
|
|
{
|
|
|
|
mark[elem++] = ALLBITS;
|
|
|
|
len -= BITS;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (len > 0)
|
|
|
|
mark[elem] |= ALLBITS >> (BITS - len);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
gc (struct database_dyn *db)
|
|
|
|
{
|
|
|
|
/* We need write access. */
|
|
|
|
pthread_rwlock_wrlock (&db->lock);
|
|
|
|
|
|
|
|
/* And the memory handling lock. */
|
|
|
|
pthread_mutex_lock (&db->memlock);
|
|
|
|
|
|
|
|
/* We need an array representing the data area. All memory
|
|
|
|
allocation is BLOCK_ALIGN aligned so this is the level at which
|
|
|
|
we have to look at the memory. We use a mark and sweep algorithm
|
|
|
|
where the marks are placed in this array. */
|
|
|
|
assert (db->head->first_free % BLOCK_ALIGN == 0);
|
2007-11-23 06:37:58 +00:00
|
|
|
|
|
|
|
BITMAP_T *mark;
|
|
|
|
bool mark_use_malloc;
|
2007-11-25 21:08:01 +00:00
|
|
|
/* In prune_cache we are also using a dynamically allocated array.
|
|
|
|
If the array in the caller is too large we have malloc'ed it. */
|
|
|
|
size_t stack_used = sizeof (bool) * db->head->module;
|
2014-02-10 13:45:42 +00:00
|
|
|
if (__glibc_unlikely (stack_used > MAX_STACK_USE))
|
2007-11-25 21:08:01 +00:00
|
|
|
stack_used = 0;
|
2008-05-18 04:25:44 +00:00
|
|
|
size_t nmark = (db->head->first_free / BLOCK_ALIGN + BITS - 1) / BITS;
|
|
|
|
size_t memory_needed = nmark * sizeof (BITMAP_T);
|
2014-02-10 13:45:42 +00:00
|
|
|
if (__glibc_likely (stack_used + memory_needed <= MAX_STACK_USE))
|
2007-11-23 06:37:58 +00:00
|
|
|
{
|
2009-01-29 00:17:57 +00:00
|
|
|
mark = (BITMAP_T *) alloca_account (memory_needed, stack_used);
|
2007-11-23 06:37:58 +00:00
|
|
|
mark_use_malloc = false;
|
|
|
|
memset (mark, '\0', memory_needed);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
mark = (BITMAP_T *) xcalloc (1, memory_needed);
|
|
|
|
mark_use_malloc = true;
|
|
|
|
}
|
2004-08-26 18:35:05 +00:00
|
|
|
|
|
|
|
/* Create an array which can hold pointer to all the entries in hash
|
|
|
|
entries. */
|
2007-11-23 06:37:58 +00:00
|
|
|
memory_needed = 2 * db->head->nentries * sizeof (struct hashentry *);
|
|
|
|
struct hashentry **he;
|
|
|
|
struct hashentry **he_data;
|
|
|
|
bool he_use_malloc;
|
2014-02-10 13:45:42 +00:00
|
|
|
if (__glibc_likely (stack_used + memory_needed <= MAX_STACK_USE))
|
2007-11-23 06:37:58 +00:00
|
|
|
{
|
2009-01-29 00:17:57 +00:00
|
|
|
he = alloca_account (memory_needed, stack_used);
|
2007-11-23 06:37:58 +00:00
|
|
|
he_use_malloc = false;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
he = xmalloc (memory_needed);
|
|
|
|
he_use_malloc = true;
|
|
|
|
}
|
2009-01-29 00:17:57 +00:00
|
|
|
he_data = &he[db->head->nentries];
|
2004-08-26 18:35:05 +00:00
|
|
|
|
|
|
|
size_t cnt = 0;
|
|
|
|
for (size_t idx = 0; idx < db->head->module; ++idx)
|
|
|
|
{
|
|
|
|
ref_t *prevp = &db->head->array[idx];
|
|
|
|
ref_t run = *prevp;
|
|
|
|
|
|
|
|
while (run != ENDREF)
|
|
|
|
{
|
|
|
|
assert (cnt < db->head->nentries);
|
|
|
|
he[cnt] = (struct hashentry *) (db->data + run);
|
|
|
|
|
|
|
|
he[cnt]->prevp = prevp;
|
|
|
|
prevp = &he[cnt]->next;
|
|
|
|
|
|
|
|
/* This is the hash entry itself. */
|
|
|
|
markrange (mark, run, sizeof (struct hashentry));
|
|
|
|
|
|
|
|
/* Add the information for the data itself. We do this
|
|
|
|
only for the one special entry marked with FIRST. */
|
|
|
|
if (he[cnt]->first)
|
|
|
|
{
|
|
|
|
struct datahead *dh
|
|
|
|
= (struct datahead *) (db->data + he[cnt]->packet);
|
|
|
|
markrange (mark, he[cnt]->packet, dh->allocsize);
|
|
|
|
}
|
|
|
|
|
|
|
|
run = he[cnt]->next;
|
|
|
|
|
|
|
|
++cnt;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
assert (cnt == db->head->nentries);
|
|
|
|
|
|
|
|
/* Sort the entries by the addresses of the referenced data. All
|
|
|
|
the entries pointing to the same DATAHEAD object will have the
|
|
|
|
same key. Stability of the sorting is unimportant. */
|
|
|
|
memcpy (he_data, he, cnt * sizeof (struct hashentry *));
|
|
|
|
qsort (he_data, cnt, sizeof (struct hashentry *), sort_he_data);
|
|
|
|
|
|
|
|
/* Sort the entries by their address. */
|
|
|
|
qsort (he, cnt, sizeof (struct hashentry *), sort_he);
|
|
|
|
|
2008-06-12 04:52:39 +00:00
|
|
|
#define obstack_chunk_alloc xmalloc
|
|
|
|
#define obstack_chunk_free free
|
|
|
|
struct obstack ob;
|
|
|
|
obstack_init (&ob);
|
|
|
|
|
2004-08-26 18:35:05 +00:00
|
|
|
/* Determine the highest used address. */
|
2008-05-18 04:25:44 +00:00
|
|
|
size_t high = nmark;
|
2004-08-26 18:35:05 +00:00
|
|
|
while (high > 0 && mark[high - 1] == 0)
|
|
|
|
--high;
|
|
|
|
|
|
|
|
/* No memory used. */
|
|
|
|
if (high == 0)
|
|
|
|
{
|
|
|
|
db->head->first_free = 0;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Determine the highest offset. */
|
|
|
|
BITMAP_T mask = HIGHBIT;
|
|
|
|
ref_t highref = (high * BITS - 1) * BLOCK_ALIGN;
|
|
|
|
while ((mark[high - 1] & mask) == 0)
|
|
|
|
{
|
|
|
|
mask >>= 1;
|
|
|
|
highref -= BLOCK_ALIGN;
|
|
|
|
}
|
|
|
|
|
* posix/bits/unistd.h (confstr, getgroups, ttyname_r, gethostname,
getdomainname): Add __NTH.
* stdlib/bits/stdlib.h (ptsname_r, wctomb, mbstowcs, wcstombs):
Likewise.
(realpath): Likewise. Use __const instead of const. Add __restrict
keywords.
* socket/bits/socket2.h (recvfrom): Add __restrict keyword to __buf.
* wcsmbs/bits/wchar2.h (wmemcpy, wmemmove, wmempcpy, wmemset,
wcscpy, wcpcpy, wcsncpy, wcpncpy, wcscat, wcsncat, vswprintf, wcrtomb,
mbsrtowcs, wcsrtombs, mbsnrtowcs, wcsnrtombs): Add __NTH.
* string/bits/string3.h (__memset_ichk): Likewise.
(__memcpy_ichk, __memmove_ichk, __mempcpy_ichk, __strcpy_ichk,
__stpcpy_ichk, __strncpy_ichk, stpncpy, __strcat_ichk,
__strncat_ichk): Likewise. Use __const instead of const.
(__stpncpy_chk): Use __const instead of const.
(__stpncpy_alias): Use __REDIRECT_NTH instead of __REDIRECT.
2005-08-08 Ulrich Drepper <drepper@redhat.com>
Jakub Jelinek <jakub@redhat.com>
* nscd/mem.c (BLOCK_ALIGN_LOG, BLOCK_ALIGN, BLOCK_ALIGN_M1): Move
definitions to...
* nscd/nscd.h (BLOCK_ALIGN_LOG, BLOCK_ALIGN, BLOCK_ALIGN_M1): ...here.
* nscd/connections.c (usekey): New enum.
(check_use, verify_persistent_db): New functions.
(nscd_init): If persistent database is corrupted, unlink it and
recreate rather than falling back to non-persistent database.
Call verify_persistent_db. Avoid overflows in total computation.
2005-08-08 Ulrich Drepper <drepper@redhat.com>
* iconvdata/utf-16.c (PREPARE_LOOP): Minor cleanups to make code
better readable. Avoid passing var to loop function, it's not
necessary at all.
2005-08-08 19:04:11 +00:00
|
|
|
/* Now we can iterate over the MARK array and find bits which are not
|
2004-08-26 18:35:05 +00:00
|
|
|
set. These represent memory which can be recovered. */
|
|
|
|
size_t byte = 0;
|
|
|
|
/* Find the first gap. */
|
|
|
|
while (byte < high && mark[byte] == ALLBITS)
|
|
|
|
++byte;
|
|
|
|
|
|
|
|
if (byte == high
|
|
|
|
|| (byte == high - 1 && (mark[byte] & ~(mask | (mask - 1))) == 0))
|
|
|
|
/* No gap. */
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
mask = 1;
|
|
|
|
cnt = 0;
|
|
|
|
while ((mark[byte] & mask) != 0)
|
|
|
|
{
|
|
|
|
++cnt;
|
|
|
|
mask <<= 1;
|
|
|
|
}
|
|
|
|
ref_t off_free = (byte * BITS + cnt) * BLOCK_ALIGN;
|
|
|
|
assert (off_free <= db->head->first_free);
|
|
|
|
|
|
|
|
struct hashentry **next_hash = he;
|
|
|
|
struct hashentry **next_data = he_data;
|
|
|
|
|
|
|
|
/* Skip over the hash entries in the first block which does not get
|
|
|
|
moved. */
|
|
|
|
while (next_hash < &he[db->head->nentries]
|
|
|
|
&& *next_hash < (struct hashentry *) (db->data + off_free))
|
|
|
|
++next_hash;
|
|
|
|
|
|
|
|
while (next_data < &he_data[db->head->nentries]
|
|
|
|
&& (*next_data)->packet < off_free)
|
|
|
|
++next_data;
|
|
|
|
|
|
|
|
|
2004-09-08 15:46:42 +00:00
|
|
|
/* Now we start modifying the data. Make sure all readers of the
|
|
|
|
data are aware of this and temporarily don't use the data. */
|
2020-06-17 14:05:13 +00:00
|
|
|
atomic_fetch_add_relaxed (&db->head->gc_cycle, 1);
|
2004-09-08 15:46:42 +00:00
|
|
|
assert ((db->head->gc_cycle & 1) == 1);
|
|
|
|
|
|
|
|
|
2004-08-26 18:35:05 +00:00
|
|
|
/* We do not perform the move operations right away since the
|
|
|
|
he_data array is not sorted by the address of the data. */
|
|
|
|
struct moveinfo
|
|
|
|
{
|
|
|
|
void *from;
|
|
|
|
void *to;
|
|
|
|
size_t size;
|
|
|
|
struct moveinfo *next;
|
|
|
|
} *moves = NULL;
|
|
|
|
|
|
|
|
while (byte < high)
|
|
|
|
{
|
|
|
|
/* Search for the next filled block. BYTE is the index of the
|
|
|
|
entry in MARK, MASK is the bit, and CNT is the bit number.
|
|
|
|
OFF_FILLED is the corresponding offset. */
|
|
|
|
if ((mark[byte] & ~(mask - 1)) == 0)
|
|
|
|
{
|
|
|
|
/* No other bit set in the same element of MARK. Search in the
|
|
|
|
following memory. */
|
|
|
|
do
|
|
|
|
++byte;
|
|
|
|
while (byte < high && mark[byte] == 0);
|
|
|
|
|
|
|
|
if (byte == high)
|
|
|
|
/* That was it. */
|
|
|
|
break;
|
|
|
|
|
|
|
|
mask = 1;
|
|
|
|
cnt = 0;
|
|
|
|
}
|
|
|
|
/* Find the exact bit. */
|
|
|
|
while ((mark[byte] & mask) == 0)
|
|
|
|
{
|
|
|
|
++cnt;
|
|
|
|
mask <<= 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
ref_t off_alloc = (byte * BITS + cnt) * BLOCK_ALIGN;
|
|
|
|
assert (off_alloc <= db->head->first_free);
|
|
|
|
|
|
|
|
/* Find the end of the used area. */
|
|
|
|
if ((mark[byte] & ~(mask - 1)) == (BITMAP_T) ~(mask - 1))
|
|
|
|
{
|
|
|
|
/* All other bits set. Search the next bytes in MARK. */
|
|
|
|
do
|
|
|
|
++byte;
|
|
|
|
while (byte < high && mark[byte] == ALLBITS);
|
|
|
|
|
|
|
|
mask = 1;
|
|
|
|
cnt = 0;
|
|
|
|
}
|
|
|
|
if (byte < high)
|
|
|
|
{
|
|
|
|
/* Find the exact bit. */
|
|
|
|
while ((mark[byte] & mask) != 0)
|
|
|
|
{
|
|
|
|
++cnt;
|
|
|
|
mask <<= 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ref_t off_allocend = (byte * BITS + cnt) * BLOCK_ALIGN;
|
|
|
|
assert (off_allocend <= db->head->first_free);
|
|
|
|
/* Now we know that we can copy the area from OFF_ALLOC to
|
|
|
|
OFF_ALLOCEND (not included) to the memory starting at
|
|
|
|
OFF_FREE. First fix up all the entries for the
|
|
|
|
displacement. */
|
|
|
|
ref_t disp = off_alloc - off_free;
|
|
|
|
|
2008-05-18 06:29:10 +00:00
|
|
|
struct moveinfo *new_move;
|
2009-01-29 00:17:57 +00:00
|
|
|
if (__builtin_expect (stack_used + sizeof (*new_move) <= MAX_STACK_USE,
|
|
|
|
1))
|
|
|
|
new_move = alloca_account (sizeof (*new_move), stack_used);
|
2008-05-18 06:29:10 +00:00
|
|
|
else
|
|
|
|
new_move = obstack_alloc (&ob, sizeof (*new_move));
|
2004-08-26 18:35:05 +00:00
|
|
|
new_move->from = db->data + off_alloc;
|
|
|
|
new_move->to = db->data + off_free;
|
|
|
|
new_move->size = off_allocend - off_alloc;
|
|
|
|
/* Create a circular list to be always able to append at the end. */
|
|
|
|
if (moves == NULL)
|
|
|
|
moves = new_move->next = new_move;
|
|
|
|
else
|
|
|
|
{
|
|
|
|
new_move->next = moves->next;
|
|
|
|
moves = moves->next = new_move;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The following loop will prepare to move this much data. */
|
|
|
|
off_free += off_allocend - off_alloc;
|
|
|
|
|
|
|
|
while (off_alloc < off_allocend)
|
|
|
|
{
|
|
|
|
/* Determine whether the next entry is for a hash entry or
|
|
|
|
the data. */
|
|
|
|
if ((struct hashentry *) (db->data + off_alloc) == *next_hash)
|
|
|
|
{
|
|
|
|
/* Just correct the forward reference. */
|
|
|
|
*(*next_hash++)->prevp -= disp;
|
|
|
|
|
|
|
|
off_alloc += ((sizeof (struct hashentry) + BLOCK_ALIGN_M1)
|
|
|
|
& ~BLOCK_ALIGN_M1);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
assert (next_data < &he_data[db->head->nentries]);
|
|
|
|
assert ((*next_data)->packet == off_alloc);
|
|
|
|
|
|
|
|
struct datahead *dh = (struct datahead *) (db->data + off_alloc);
|
|
|
|
do
|
|
|
|
{
|
|
|
|
assert ((*next_data)->key >= (*next_data)->packet);
|
|
|
|
assert ((*next_data)->key + (*next_data)->len
|
|
|
|
<= (*next_data)->packet + dh->allocsize);
|
|
|
|
|
|
|
|
(*next_data)->packet -= disp;
|
|
|
|
(*next_data)->key -= disp;
|
|
|
|
++next_data;
|
|
|
|
}
|
|
|
|
while (next_data < &he_data[db->head->nentries]
|
|
|
|
&& (*next_data)->packet == off_alloc);
|
|
|
|
|
|
|
|
off_alloc += (dh->allocsize + BLOCK_ALIGN_M1) & ~BLOCK_ALIGN_M1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
assert (off_alloc == off_allocend);
|
|
|
|
|
|
|
|
assert (off_alloc <= db->head->first_free);
|
|
|
|
if (off_alloc == db->head->first_free)
|
|
|
|
/* We are done, that was the last block. */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
assert (next_hash == &he[db->head->nentries]);
|
|
|
|
assert (next_data == &he_data[db->head->nentries]);
|
|
|
|
|
|
|
|
/* Now perform the actual moves. */
|
|
|
|
if (moves != NULL)
|
|
|
|
{
|
|
|
|
struct moveinfo *runp = moves->next;
|
|
|
|
do
|
|
|
|
{
|
|
|
|
assert ((char *) runp->to >= db->data);
|
|
|
|
assert ((char *) runp->to + runp->size
|
|
|
|
<= db->data + db->head->first_free);
|
|
|
|
assert ((char *) runp->from >= db->data);
|
|
|
|
assert ((char *) runp->from + runp->size
|
|
|
|
<= db->data + db->head->first_free);
|
|
|
|
|
|
|
|
/* The regions may overlap. */
|
|
|
|
memmove (runp->to, runp->from, runp->size);
|
|
|
|
runp = runp->next;
|
|
|
|
}
|
|
|
|
while (runp != moves->next);
|
|
|
|
|
2014-02-10 13:45:42 +00:00
|
|
|
if (__glibc_unlikely (debug_level >= 3))
|
2004-08-26 18:35:05 +00:00
|
|
|
dbg_log (_("freed %zu bytes in %s cache"),
|
2014-12-17 12:04:07 +00:00
|
|
|
(size_t) (db->head->first_free
|
|
|
|
- ((char *) moves->to + moves->size - db->data)),
|
2004-08-26 18:35:05 +00:00
|
|
|
dbnames[db - dbs]);
|
|
|
|
|
|
|
|
/* The byte past the end of the last copied block is the next
|
|
|
|
available byte. */
|
|
|
|
db->head->first_free = (char *) moves->to + moves->size - db->data;
|
|
|
|
|
|
|
|
/* Consistency check. */
|
2014-02-10 13:45:42 +00:00
|
|
|
if (__glibc_unlikely (debug_level >= 3))
|
2004-08-26 18:35:05 +00:00
|
|
|
{
|
|
|
|
for (size_t idx = 0; idx < db->head->module; ++idx)
|
|
|
|
{
|
|
|
|
ref_t run = db->head->array[idx];
|
|
|
|
size_t cnt = 0;
|
|
|
|
|
|
|
|
while (run != ENDREF)
|
|
|
|
{
|
|
|
|
if (run + sizeof (struct hashentry) > db->head->first_free)
|
|
|
|
{
|
|
|
|
dbg_log ("entry %zu in hash bucket %zu out of bounds: "
|
|
|
|
"%" PRIu32 "+%zu > %zu\n",
|
|
|
|
cnt, idx, run, sizeof (struct hashentry),
|
2004-09-13 05:56:39 +00:00
|
|
|
(size_t) db->head->first_free);
|
2004-08-26 18:35:05 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
struct hashentry *he = (struct hashentry *) (db->data + run);
|
|
|
|
|
|
|
|
if (he->key + he->len > db->head->first_free)
|
|
|
|
dbg_log ("key of entry %zu in hash bucket %zu out of "
|
|
|
|
"bounds: %" PRIu32 "+%zu > %zu\n",
|
2004-09-13 05:56:39 +00:00
|
|
|
cnt, idx, he->key, (size_t) he->len,
|
|
|
|
(size_t) db->head->first_free);
|
2004-08-26 18:35:05 +00:00
|
|
|
|
|
|
|
if (he->packet + sizeof (struct datahead)
|
|
|
|
> db->head->first_free)
|
|
|
|
dbg_log ("packet of entry %zu in hash bucket %zu out of "
|
|
|
|
"bounds: %" PRIu32 "+%zu > %zu\n",
|
|
|
|
cnt, idx, he->packet, sizeof (struct datahead),
|
2004-09-13 05:56:39 +00:00
|
|
|
(size_t) db->head->first_free);
|
2004-08-26 18:35:05 +00:00
|
|
|
else
|
|
|
|
{
|
|
|
|
struct datahead *dh = (struct datahead *) (db->data
|
|
|
|
+ he->packet);
|
|
|
|
if (he->packet + dh->allocsize
|
|
|
|
> db->head->first_free)
|
|
|
|
dbg_log ("full key of entry %zu in hash bucket %zu "
|
|
|
|
"out of bounds: %" PRIu32 "+%zu > %zu",
|
2004-09-13 05:56:39 +00:00
|
|
|
cnt, idx, he->packet, (size_t) dh->allocsize,
|
|
|
|
(size_t) db->head->first_free);
|
2004-08-26 18:35:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
run = he->next;
|
|
|
|
++cnt;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Make sure the data on disk is updated. */
|
|
|
|
if (db->persistent)
|
|
|
|
msync (db->head, db->data + db->head->first_free - (char *) db->head,
|
|
|
|
MS_ASYNC);
|
|
|
|
|
2004-09-08 15:46:42 +00:00
|
|
|
|
|
|
|
/* Now we are done modifying the data. */
|
2020-06-17 14:05:13 +00:00
|
|
|
atomic_fetch_add_relaxed (&db->head->gc_cycle, 1);
|
2004-09-08 15:46:42 +00:00
|
|
|
assert ((db->head->gc_cycle & 1) == 0);
|
|
|
|
|
2004-08-26 18:35:05 +00:00
|
|
|
/* We are done. */
|
|
|
|
out:
|
|
|
|
pthread_mutex_unlock (&db->memlock);
|
|
|
|
pthread_rwlock_unlock (&db->lock);
|
2007-11-23 06:37:58 +00:00
|
|
|
|
|
|
|
if (he_use_malloc)
|
|
|
|
free (he);
|
|
|
|
if (mark_use_malloc)
|
|
|
|
free (mark);
|
2008-05-18 06:29:10 +00:00
|
|
|
|
|
|
|
obstack_free (&ob, NULL);
|
2004-08-26 18:35:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
void *
|
2009-02-13 20:36:37 +00:00
|
|
|
mempool_alloc (struct database_dyn *db, size_t len, int data_alloc)
|
2004-08-26 18:35:05 +00:00
|
|
|
{
|
|
|
|
/* Make sure LEN is a multiple of our maximum alignment so we can
|
|
|
|
keep track of used memory is multiples of this alignment value. */
|
|
|
|
if ((len & BLOCK_ALIGN_M1) != 0)
|
|
|
|
len += BLOCK_ALIGN - (len & BLOCK_ALIGN_M1);
|
|
|
|
|
2009-02-13 20:36:37 +00:00
|
|
|
if (data_alloc)
|
|
|
|
pthread_rwlock_rdlock (&db->lock);
|
|
|
|
|
2004-08-26 18:35:05 +00:00
|
|
|
pthread_mutex_lock (&db->memlock);
|
|
|
|
|
|
|
|
assert ((db->head->first_free & BLOCK_ALIGN_M1) == 0);
|
|
|
|
|
|
|
|
bool tried_resize = false;
|
|
|
|
void *res;
|
|
|
|
retry:
|
|
|
|
res = db->data + db->head->first_free;
|
|
|
|
|
2014-02-10 13:45:42 +00:00
|
|
|
if (__glibc_unlikely (db->head->first_free + len > db->head->data_size))
|
2004-08-26 18:35:05 +00:00
|
|
|
{
|
|
|
|
if (! tried_resize)
|
|
|
|
{
|
|
|
|
/* Try to resize the database. Grow size of 1/8th. */
|
|
|
|
size_t oldtotal = (sizeof (struct database_pers_head)
|
2007-11-23 06:37:58 +00:00
|
|
|
+ roundup (db->head->module * sizeof (ref_t),
|
|
|
|
ALIGN)
|
2004-08-26 18:35:05 +00:00
|
|
|
+ db->head->data_size);
|
2005-08-23 23:21:53 +00:00
|
|
|
size_t new_data_size = (db->head->data_size
|
|
|
|
+ MAX (2 * len, db->head->data_size / 8));
|
2004-08-26 18:35:05 +00:00
|
|
|
size_t newtotal = (sizeof (struct database_pers_head)
|
2006-10-02 16:34:25 +00:00
|
|
|
+ roundup (db->head->module * sizeof (ref_t), ALIGN)
|
2004-08-26 18:35:05 +00:00
|
|
|
+ new_data_size);
|
2005-08-23 23:21:53 +00:00
|
|
|
if (newtotal > db->max_db_size)
|
|
|
|
{
|
|
|
|
new_data_size -= newtotal - db->max_db_size;
|
|
|
|
newtotal = db->max_db_size;
|
|
|
|
}
|
2004-08-26 18:35:05 +00:00
|
|
|
|
2005-08-23 23:21:53 +00:00
|
|
|
if (db->mmap_used && newtotal > oldtotal
|
|
|
|
/* We only have to adjust the file size. The new pages
|
|
|
|
become magically available. */
|
|
|
|
&& TEMP_FAILURE_RETRY_VAL (posix_fallocate (db->wr_fd, oldtotal,
|
|
|
|
newtotal
|
|
|
|
- oldtotal)) == 0)
|
2004-08-26 18:35:05 +00:00
|
|
|
{
|
|
|
|
db->head->data_size = new_data_size;
|
|
|
|
tried_resize = true;
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-07-17 14:49:16 +00:00
|
|
|
if (data_alloc)
|
|
|
|
pthread_rwlock_unlock (&db->lock);
|
|
|
|
|
2004-08-26 18:35:05 +00:00
|
|
|
if (! db->last_alloc_failed)
|
|
|
|
{
|
|
|
|
dbg_log (_("no more memory for database '%s'"), dbnames[db - dbs]);
|
|
|
|
|
|
|
|
db->last_alloc_failed = true;
|
|
|
|
}
|
|
|
|
|
2009-02-13 20:36:37 +00:00
|
|
|
++db->head->addfailed;
|
|
|
|
|
2004-08-26 18:35:05 +00:00
|
|
|
/* No luck. */
|
|
|
|
res = NULL;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2008-05-18 03:57:19 +00:00
|
|
|
db->head->first_free += len;
|
|
|
|
|
|
|
|
db->last_alloc_failed = false;
|
|
|
|
|
2004-08-26 18:35:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
pthread_mutex_unlock (&db->memlock);
|
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|