mirror of
https://sourceware.org/git/glibc.git
synced 2024-12-22 02:40:08 +00:00
af69217fea
1998-01-26 00:57 Ulrich Drepper <drepper@cygnus.com> * db2/Makefile (routines): Add os_alloc. * db2/db.h: Update from db-2.3.16. * db2/db_185.h: Likewise. * db2/db_int.h: Likewise. * db2/btree/bt_close.c: Likewise. * db2/btree/bt_cursor.c: Likewise. * db2/btree/bt_delete.c: Likewise. * db2/btree/bt_open.c: Likewise. * db2/btree/bt_page.c: Likewise. * db2/btree/bt_put.c: Likewise. * db2/btree/bt_rec.c: Likewise. * db2/btree/bt_recno.c: Likewise. * db2/btree/btree_auto.c: Likewise. * db2/common/db_appinit.c: Likewise. * db2/common/db_apprec.c: Likewise. * db2/common/db_err.c: Likewise. * db2/common/db_region.c: Likewise. * db2/common/db_shash.c: Likewise. * db2/db/db.c: Likewise. * db2/db/db_auto.c: Likewise. * db2/db/db_conv.c: Likewise. * db2/db/db_dispatch.c: Likewise. * db2/db/db_dup.c: Likewise. * db2/db/db_pr.c: Likewise. * db2/db/db_rec.c: Likewise. * db2/db185/db185.c: Likewise. * db2/dbm/dbm.c: Likewise. * db2/hash/hash.c: Likewise. * db2/hash/hash_auto.c: Likewise. * db2/hash/hash_dup.c: Likewise. * db2/hash/hash_page.c: Likewise. * db2/hash/hash_rec.c: Likewise. * db2/include/btree_ext.h: Likewise. * db2/include/clib_ext.h: Likewise. * db2/include/common_ext.h: Likewise. * db2/include/db.h.src: Likewise. * db2/include/db_185.h.src: Likewise. * db2/include/db_am.h: Likewise. * db2/include/db_ext.h: Likewise. * db2/include/db_int.h.src: Likewise. * db2/include/hash_ext.h: Likewise. * db2/include/lock_ext.h: Likewise. * db2/include/log.h: Likewise. * db2/include/log_auto.h: Likewise. * db2/include/log_ext.h: Likewise. * db2/include/mp.h: Likewise. * db2/include/mp_ext.h: Likewise. * db2/include/mutex_ext.h: Likewise. * db2/include/os_ext.h: Likewise. * db2/include/os_func.h: Likewise. * db2/include/txn_ext.h: Likewise. * db2/lock/lock.c: Likewise. * db2/lock/lock_util.c: Likewise. * db2/log/log.c: Likewise. * db2/log/log.src: Likewise. * db2/log/log_archive.c: Likewise. * db2/log/log_auto.c: Likewise. * db2/log/log_get.c: Likewise. * db2/log/log_put.c: Likewise. * db2/log/log_rec.c: Likewise. * db2/log/log_register.c: Likewise. * db2/mp/mp_bh.c: Likewise. * db2/mp/mp_fget.c: Likewise. * db2/mp/mp_fopen.c: Likewise. * db2/mp/mp_fput.c: Likewise. * db2/mp/mp_pr.c: Likewise. * db2/mp/mp_sync.c: Likewise. * db2/mutex/mutex.c: Likewise. * db2/os/os_alloc.c: Likewise. * db2/os/os_config.c: Likewise. * db2/os/os_stat.c: Likewise. * db2/progs/db_checkpoint/db_checkpoint.c: Likewise. * db2/progs/db_deadlock/db_deadlock.c: Likewise. * db2/progs/db_load/db_load.c: Likewise. * db2/progs/db_printlog/db_printlog.c: Likewise. * db2/progs/db_recover/db_recover.c: Likewise. * db2/progs/db_stat/db_stat.c: Likewise. * db2/txn/txn.c: Likewise. * elf/dl-close.c (_dl_close): Rename inner variable named map to imap. Unmap memory blocks in reverse order of allocation. Call munmap with load address added to offset. Bug reported by Miguel de Icaza. * locale/programs/ld-collate.c (collate_end_weight): Correctly fill up weight array. * localedata/locales/cs_CZ: Update by Michael Mraka <michael@informatics.muni.cz>. * misc/sys/syslog.h: Reformat a bit. Don't declare vsyslog unless __USE_BSD. * nis/nss_compat/compat-grp.c: Correctly handle buffer overflow while reading line with fgets. * nis/nss_compat/compat-pwd.c: Likewise. * nis/nss_compat/compat-spwd.c: Likewise. * nss/nss_files/files-XXX.c: Likewise. * nss/nss_files/files-alias.c: Likewise. * shadow/fgetspent_r.c: Likewise. * string/strerror_r.c: Correct comment. Patch by Andreas Jaeger. * sysdeps/unix/sysv/linux/bits/stat.h: Define _STATBUF_ST_RDEV. * sysdeps/unix/sysv/linux/alpha/bits/stat.h: Likewise. * sysdeps/unix/sysv/linux/mips/bits/stat.h: Likewise. * sysdeps/unix/sysv/linux/i386/sys/ucontext.h: Allocate room for FP register content in ucontext_t. 1998-01-22 Andreas Jaeger <aj@arthur.rhein-neckar.de> * sysdeps/libm-ieee754/s_modfl.c (modf): Correct calculation. Patch by Markus Schoder <Markus.Schoder@dresdner-bank.de>. * math/libm-test.c (modf_test): Add more tests for modf. * sysdeps/unix/sysv/linux/sys/mman.h: Add const to mlock and munlock according to Unix98. 1998-01-25 Andreas Jaeger <aj@arthur.rhein-neckar.de> * nss/nss_files/files-network.c (LINE_PARSER): Set n_addrtype to AF_INET. 1998-01-21 07:22 H.J. Lu <hjl@gnu.org> * sysdeps/posix/ttyname.c: Handle symbolic links. * sysdeps/posix/ttyname_r.c: Ditto. 1998-01-25 19:39 Ulrich Drepper <drepper@cygnus.com> * db2/makedb.c (process_input): Write terminating \0 for value. (print_database): Simply print value string, don't use length. 1998-01-24 Andreas Jaeger <aj@arthur.rhein-neckar.de> * nss/nsswitch.c (__nss_configure_lookup): Correct test for early exit of loop. 1998-01-25 Thorsten Kukuk <kukuk@vt.uni-paderborn.de> * nis/ypclnt.c: Don't set dom_client to NULL, or we will not close all sockets. 1998-01-25 14:54 Ulrich Drepper <drepper@cygnus.com> * signal/Makefile (routines): Add sighold, sigrelse, sigignore, and sigset. * signal/sighold.c: New file. * signal/sigrelse.c: New file. * sysdeps/generic/sigignore.c: New file. * sysdeps/generic/sigset.c: New file. * sysdeps/posix/sigignore.c: New file. * sysdeps/posix/sigset.c: New file. * signal/signal.h: Add prototypes for new functions. * sysdeps/unix/sysv/linux/alpha/bits/signum.h: Define SIG_HOLD. * sysdeps/unix/sysv/linux/bits/signum.h: Likewise. * sysdeps/unix/sysv/linux/mips/bits/signum.h: Likewise. * sysdeps/unix/sysv/linux/sparc/bits/signum.h: Likewise. 1998-01-23 00:16 Tim Waugh <tim@cyberelk.demon.co.uk> * posix/wordexp.c: Added IFS field-splitting in parameter and command substitution. Fixed an IFS-related bug that caused an infinite loop. 1998-01-25 12:38 Ulrich Drepper <drepper@cygnus.com> * stdlib/Makefile (routines): Add getcontext, setcontext, makecontext, and swapcontext. * stdlib/ucontext.h: Correct prototypes. * sysdeps/generic/getcontext.c: New file. * sysdeps/generic/setcontext.c: New file. * sysdeps/generic/makecontext.c: New file. * sysdeps/generic/swapcontext.c: New file. * sysdeps/unix/sysv/linux/sparc/sparc64/Makefile: Removed. 1998-01-25 04:07 Ulrich Drepper <drepper@cygnus.com> * resource/sys/resource.h: Remove #include of ulimit.h which is not allowed by SUSv2 and not available on other systems. * sysdeps/unix/sysv/linux/ulimit.c: Include ulimit.h. * streams/Makefile: New file. * streams/stropts.h: New file. * sysdeps/generic/bits/stropts.h: New file. * sysdeps/generic/isastream.c: New file. * sysdeps/generic/getmsg.c: New file. * sysdeps/generic/getpmsg.c: New file. * sysdeps/generic/putmsg.c: New file. * sysdeps/generic/putpmsg.c: New file. * sysdeps/generic/fattach.c: New file. * sysdeps/generic/fdetach.c: New file. * sysdeps/unix/inet/Subdirs: Add streams. * sysdeps/generic/bits/types.h: Add definition of __t_scalar_t and __t_uscalar_t. * sysdeps/unix/sysv/linux/bits/types.h: Likewise. * sysdeps/unix/sysv/linux/alpha/bits/types.h: Likewise. * sysdeps/unix/sysv/linux/mips/bits/types.h: Likewise. * sysdeps/unix/sysv/linux/sparc/sparc64/bits/types.h: Likewise. * sysdeps/mach/_strerror.c: Follow changes of generic version to handle small buffers correctly. 1998-01-24 17:31 H.J. Lu <hjl@gnu.org> * sysdeps/alpha/dl-machine.h (ELF_MACHINE_RUNTIME_TRAMPOLINE): Remove the 3rd arg and add declaration for _dl_runtime_resolve and _dl_runtime_profile. 1998-01-25 02:42 Ulrich Drepper <drepper@cygnus.com> * include/cpio.h: New file. Mainly copied from GNU cpio. * posix/Makefile (headers): Add cpio.h.
545 lines
15 KiB
C
545 lines
15 KiB
C
/*-
|
|
* See the file LICENSE for redistribution information.
|
|
*
|
|
* Copyright (c) 1996, 1997
|
|
* Sleepycat Software. All rights reserved.
|
|
*/
|
|
/*
|
|
* Copyright (c) 1990, 1993, 1994
|
|
* The Regents of the University of California. All rights reserved.
|
|
*
|
|
* This code is derived from software contributed to Berkeley by
|
|
* Margo Seltzer.
|
|
*
|
|
* Redistribution and use in source and binary forms, with or without
|
|
* modification, are permitted provided that the following conditions
|
|
* are met:
|
|
* 1. Redistributions of source code must retain the above copyright
|
|
* notice, this list of conditions and the following disclaimer.
|
|
* 2. Redistributions in binary form must reproduce the above copyright
|
|
* notice, this list of conditions and the following disclaimer in the
|
|
* documentation and/or other materials provided with the distribution.
|
|
* 3. All advertising materials mentioning features or use of this software
|
|
* must display the following acknowledgement:
|
|
* This product includes software developed by the University of
|
|
* California, Berkeley and its contributors.
|
|
* 4. Neither the name of the University nor the names of its contributors
|
|
* may be used to endorse or promote products derived from this software
|
|
* without specific prior written permission.
|
|
*
|
|
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
|
|
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
|
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
|
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
|
|
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
|
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
|
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
|
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
|
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
|
* SUCH DAMAGE.
|
|
*/
|
|
#include "config.h"
|
|
|
|
#ifndef lint
|
|
static const char sccsid[] = "@(#)hash_dup.c 10.10 (Sleepycat) 1/8/98";
|
|
#endif /* not lint */
|
|
|
|
/*
|
|
* PACKAGE: hashing
|
|
*
|
|
* DESCRIPTION:
|
|
* Manipulation of duplicates for the hash package.
|
|
*
|
|
* ROUTINES:
|
|
*
|
|
* External
|
|
* __add_dup
|
|
* Internal
|
|
*/
|
|
|
|
#ifndef NO_SYSTEM_INCLUDES
|
|
#include <sys/types.h>
|
|
|
|
#include <stdio.h>
|
|
#include <stdlib.h>
|
|
#include <string.h>
|
|
#include <unistd.h>
|
|
#endif
|
|
|
|
#include "db_int.h"
|
|
#include "db_page.h"
|
|
#include "db_swap.h"
|
|
#include "hash.h"
|
|
|
|
static int __ham_check_move __P((HTAB *, HASH_CURSOR *, int32_t));
|
|
static int __ham_dup_convert __P((HTAB *, HASH_CURSOR *));
|
|
static int __ham_make_dup __P((const DBT *, DBT *d, void **, u_int32_t *));
|
|
|
|
/*
|
|
* Called from hash_access to add a duplicate key. nval is the new
|
|
* value that we want to add. The flags correspond to the flag values
|
|
* to cursor_put indicating where to add the new element.
|
|
* There are 4 cases.
|
|
* Case 1: The existing duplicate set already resides on a separate page.
|
|
* We can use common code for this.
|
|
* Case 2: The element is small enough to just be added to the existing set.
|
|
* Case 3: The element is large enough to be a big item, so we're going to
|
|
* have to push the set onto a new page.
|
|
* Case 4: The element is large enough to push the duplicate set onto a
|
|
* separate page.
|
|
*
|
|
* PUBLIC: int __ham_add_dup __P((HTAB *, HASH_CURSOR *, DBT *, int));
|
|
*/
|
|
int
|
|
__ham_add_dup(hashp, hcp, nval, flags)
|
|
HTAB *hashp;
|
|
HASH_CURSOR *hcp;
|
|
DBT *nval;
|
|
int flags;
|
|
{
|
|
DBT pval, tmp_val;
|
|
u_int32_t del_len, new_size;
|
|
int ret;
|
|
u_int8_t *hk;
|
|
|
|
if (flags == DB_CURRENT && hcp->dpgno == PGNO_INVALID)
|
|
del_len = hcp->dup_len;
|
|
else
|
|
del_len = 0;
|
|
|
|
if ((ret = __ham_check_move(hashp, hcp,
|
|
(int32_t)DUP_SIZE(nval->size) - (int32_t)del_len)) != 0)
|
|
return (ret);
|
|
|
|
/*
|
|
* Check if resulting duplicate set is going to need to go
|
|
* onto a separate duplicate page. If so, convert the
|
|
* duplicate set and add the new one. After conversion,
|
|
* hcp->dndx is the first free ndx or the index of the
|
|
* current pointer into the duplicate set.
|
|
*/
|
|
hk = H_PAIRDATA(hcp->pagep, hcp->bndx);
|
|
new_size = DUP_SIZE(nval->size) - del_len + LEN_HKEYDATA(hcp->pagep,
|
|
hashp->hdr->pagesize, H_DATAINDEX(hcp->bndx));
|
|
|
|
/*
|
|
* We convert to off-page duplicates if the item is a big item,
|
|
* the addition of the new item will make the set large, or
|
|
* if there isn't enough room on this page to add the next item.
|
|
*/
|
|
if (HPAGE_PTYPE(hk) != H_OFFDUP &&
|
|
(HPAGE_PTYPE(hk) == H_OFFPAGE || ISBIG(hashp, new_size) ||
|
|
DUP_SIZE(nval->size) - del_len > P_FREESPACE(hcp->pagep))) {
|
|
|
|
if ((ret = __ham_dup_convert(hashp, hcp)) != 0)
|
|
return (ret);
|
|
else
|
|
hk = H_PAIRDATA(hcp->pagep, hcp->bndx);
|
|
}
|
|
|
|
/* There are two separate cases here: on page and off page. */
|
|
if (HPAGE_PTYPE(hk) != H_OFFDUP) {
|
|
if (HPAGE_PTYPE(hk) != H_DUPLICATE) {
|
|
HPAGE_PTYPE(hk) = H_DUPLICATE;
|
|
pval.flags = 0;
|
|
pval.data = HKEYDATA_DATA(hk);
|
|
pval.size = LEN_HDATA(hcp->pagep, hashp->hdr->pagesize,
|
|
hcp->bndx);
|
|
if ((ret =
|
|
__ham_make_dup(&pval, &tmp_val, &hcp->big_data,
|
|
&hcp->big_datalen)) != 0 || (ret =
|
|
__ham_replpair(hashp, hcp, &tmp_val, 1)) != 0)
|
|
return (ret);
|
|
}
|
|
|
|
/* Now make the new entry a duplicate. */
|
|
if ((ret = __ham_make_dup(nval,
|
|
&tmp_val, &hcp->big_data, &hcp->big_datalen)) != 0)
|
|
return (ret);
|
|
|
|
tmp_val.dlen = 0;
|
|
switch (flags) { /* On page. */
|
|
case DB_KEYFIRST:
|
|
tmp_val.doff = 0;
|
|
break;
|
|
case DB_KEYLAST:
|
|
tmp_val.doff = LEN_HDATA(hcp->pagep,
|
|
hashp->hdr->pagesize, hcp->bndx);
|
|
break;
|
|
case DB_CURRENT:
|
|
tmp_val.doff = hcp->dup_off;
|
|
tmp_val.dlen = DUP_SIZE(hcp->dup_len);
|
|
break;
|
|
case DB_BEFORE:
|
|
tmp_val.doff = hcp->dup_off;
|
|
break;
|
|
case DB_AFTER:
|
|
tmp_val.doff = hcp->dup_off + DUP_SIZE(hcp->dup_len);
|
|
break;
|
|
}
|
|
/* Add the duplicate. */
|
|
ret = __ham_replpair(hashp, hcp, &tmp_val, 0);
|
|
if (ret == 0)
|
|
ret = __ham_dirty_page(hashp, hcp->pagep);
|
|
__ham_c_update(hcp, hcp->pgno, tmp_val.size, 1, 1);
|
|
return (ret);
|
|
}
|
|
|
|
/* If we get here, then we're on duplicate pages. */
|
|
if (hcp->dpgno == PGNO_INVALID) {
|
|
memcpy(&hcp->dpgno, HOFFDUP_PGNO(hk), sizeof(db_pgno_t));
|
|
hcp->dndx = 0;
|
|
}
|
|
|
|
switch (flags) {
|
|
case DB_KEYFIRST:
|
|
/*
|
|
* The only way that we are already on a dup page is
|
|
* if we just converted the on-page representation.
|
|
* In that case, we've only got one page of duplicates.
|
|
*/
|
|
if (hcp->dpagep == NULL && (ret =
|
|
__db_dend(hashp->dbp, hcp->dpgno, &hcp->dpagep)) != 0)
|
|
return (ret);
|
|
hcp->dndx = 0;
|
|
break;
|
|
case DB_KEYLAST:
|
|
if (hcp->dpagep == NULL && (ret =
|
|
__db_dend(hashp->dbp, hcp->dpgno, &hcp->dpagep)) != 0)
|
|
return (ret);
|
|
hcp->dpgno = PGNO(hcp->dpagep);
|
|
hcp->dndx = NUM_ENT(hcp->dpagep);
|
|
break;
|
|
case DB_CURRENT:
|
|
if ((ret = __db_ditem(hashp->dbp, hcp->dpagep, hcp->dndx,
|
|
BKEYDATA_SIZE(GET_BKEYDATA(hcp->dpagep, hcp->dndx)->len)))
|
|
!= 0)
|
|
return (ret);
|
|
break;
|
|
case DB_BEFORE: /* The default behavior is correct. */
|
|
break;
|
|
case DB_AFTER:
|
|
hcp->dndx++;
|
|
break;
|
|
}
|
|
|
|
ret = __db_dput(hashp->dbp,
|
|
nval, &hcp->dpagep, &hcp->dndx, __ham_overflow_page);
|
|
hcp->pgno = PGNO(hcp->pagep);
|
|
__ham_c_update(hcp, hcp->pgno, nval->size, 1, 1);
|
|
return (ret);
|
|
}
|
|
|
|
/*
|
|
* Convert an on-page set of duplicates to an offpage set of duplicates.
|
|
*/
|
|
static int
|
|
__ham_dup_convert(hashp, hcp)
|
|
HTAB *hashp;
|
|
HASH_CURSOR *hcp;
|
|
{
|
|
BOVERFLOW bo;
|
|
DBT dbt;
|
|
HOFFPAGE ho;
|
|
db_indx_t dndx, len;
|
|
int ret;
|
|
u_int8_t *p, *pend;
|
|
|
|
/*
|
|
* Create a new page for the duplicates.
|
|
*/
|
|
if ((ret =
|
|
__ham_overflow_page(hashp->dbp, P_DUPLICATE, &hcp->dpagep)) != 0)
|
|
return (ret);
|
|
hcp->dpagep->type = P_DUPLICATE;
|
|
hcp->dpgno = PGNO(hcp->dpagep);
|
|
|
|
/*
|
|
* Now put the duplicates onto the new page.
|
|
*/
|
|
dbt.flags = 0;
|
|
switch (HPAGE_PTYPE(H_PAIRDATA(hcp->pagep, hcp->bndx))) {
|
|
case H_KEYDATA:
|
|
/* Simple case, one key on page; move it to dup page. */
|
|
dndx = 0;
|
|
dbt.size =
|
|
LEN_HDATA(hcp->pagep, hashp->hdr->pagesize, hcp->bndx);
|
|
dbt.data = HKEYDATA_DATA(H_PAIRDATA(hcp->pagep, hcp->bndx));
|
|
ret = __db_pitem(hashp->dbp, hcp->dpagep,
|
|
(u_int32_t)dndx, BKEYDATA_SIZE(dbt.size), NULL, &dbt);
|
|
if (ret == 0)
|
|
__ham_dirty_page(hashp, hcp->dpagep);
|
|
break;
|
|
case H_OFFPAGE:
|
|
/* Simple case, one key on page; move it to dup page. */
|
|
dndx = 0;
|
|
memcpy(&ho,
|
|
P_ENTRY(hcp->pagep, H_DATAINDEX(hcp->bndx)), HOFFPAGE_SIZE);
|
|
B_TSET(bo.type, ho.type, 0);
|
|
bo.pgno = ho.pgno;
|
|
bo.tlen = ho.tlen;
|
|
dbt.size = BOVERFLOW_SIZE;
|
|
dbt.data = &bo;
|
|
|
|
ret = __db_pitem(hashp->dbp, hcp->dpagep,
|
|
(u_int32_t)dndx, dbt.size, &dbt, NULL);
|
|
if (ret == 0)
|
|
__ham_dirty_page(hashp, hcp->dpagep);
|
|
break;
|
|
case H_DUPLICATE:
|
|
p = HKEYDATA_DATA(H_PAIRDATA(hcp->pagep, hcp->bndx));
|
|
pend = p +
|
|
LEN_HDATA(hcp->pagep, hashp->hdr->pagesize, hcp->bndx);
|
|
|
|
for (dndx = 0; p < pend; dndx++) {
|
|
memcpy(&len, p, sizeof(db_indx_t));
|
|
dbt.size = len;
|
|
p += sizeof(db_indx_t);
|
|
dbt.data = p;
|
|
p += len + sizeof(db_indx_t);
|
|
ret = __db_dput(hashp->dbp, &dbt,
|
|
&hcp->dpagep, &dndx, __ham_overflow_page);
|
|
if (ret != 0)
|
|
break;
|
|
}
|
|
break;
|
|
default:
|
|
ret = __db_pgfmt(hashp->dbp, (u_long)hcp->pgno);
|
|
}
|
|
if (ret == 0) {
|
|
/*
|
|
* Now attach this to the source page in place of
|
|
* the old duplicate item.
|
|
*/
|
|
__ham_move_offpage(hashp, hcp->pagep,
|
|
(u_int32_t)H_DATAINDEX(hcp->bndx), hcp->dpgno);
|
|
|
|
/* Can probably just do a "put" here. */
|
|
ret = __ham_dirty_page(hashp, hcp->pagep);
|
|
} else {
|
|
(void)__ham_del_page(hashp->dbp, hcp->dpagep);
|
|
hcp->dpagep = NULL;
|
|
}
|
|
return (ret);
|
|
}
|
|
|
|
static int
|
|
__ham_make_dup(notdup, duplicate, bufp, sizep)
|
|
const DBT *notdup;
|
|
DBT *duplicate;
|
|
void **bufp;
|
|
u_int32_t *sizep;
|
|
{
|
|
db_indx_t tsize, item_size;
|
|
int ret;
|
|
u_int8_t *p;
|
|
|
|
item_size = (db_indx_t)notdup->size;
|
|
tsize = DUP_SIZE(item_size);
|
|
if ((ret = __ham_init_dbt(duplicate, tsize, bufp, sizep)) != 0)
|
|
return (ret);
|
|
|
|
duplicate->dlen = 0;
|
|
duplicate->flags = notdup->flags;
|
|
F_SET(duplicate, DB_DBT_PARTIAL);
|
|
|
|
p = duplicate->data;
|
|
memcpy(p, &item_size, sizeof(db_indx_t));
|
|
p += sizeof(db_indx_t);
|
|
memcpy(p, notdup->data, notdup->size);
|
|
p += notdup->size;
|
|
memcpy(p, &item_size, sizeof(db_indx_t));
|
|
|
|
duplicate->doff = 0;
|
|
duplicate->dlen = notdup->size;
|
|
|
|
return (0);
|
|
}
|
|
|
|
static int
|
|
__ham_check_move(hashp, hcp, add_len)
|
|
HTAB *hashp;
|
|
HASH_CURSOR *hcp;
|
|
int32_t add_len;
|
|
{
|
|
DBT k, d;
|
|
DB_LSN new_lsn;
|
|
PAGE *next_pagep;
|
|
db_pgno_t next_pgno;
|
|
int rectype, ret;
|
|
u_int32_t new_datalen, old_len;
|
|
u_int8_t *hk;
|
|
|
|
/*
|
|
* Check if we can do whatever we need to on this page. If not,
|
|
* then we'll have to move the current element to a new page.
|
|
*/
|
|
hk = H_PAIRDATA(hcp->pagep, hcp->bndx);
|
|
|
|
/*
|
|
* If the item is already off page duplicates or an offpage item,
|
|
* then we know we can do whatever we need to do in-place
|
|
*/
|
|
if (HPAGE_PTYPE(hk) == H_OFFDUP || HPAGE_PTYPE(hk) == H_OFFPAGE)
|
|
return (0);
|
|
|
|
old_len =
|
|
LEN_HITEM(hcp->pagep, hashp->hdr->pagesize, H_DATAINDEX(hcp->bndx));
|
|
new_datalen = old_len - HKEYDATA_SIZE(0) + add_len;
|
|
|
|
/*
|
|
* We need to add a new page under two conditions:
|
|
* 1. The addition makes the total data length cross the BIG
|
|
* threshold and the OFFDUP structure won't fit on this page.
|
|
* 2. The addition does not make the total data cross the
|
|
* threshold, but the new data won't fit on the page.
|
|
* If neither of these is true, then we can return.
|
|
*/
|
|
if (ISBIG(hashp, new_datalen) && (old_len > HOFFDUP_SIZE ||
|
|
HOFFDUP_SIZE - old_len <= P_FREESPACE(hcp->pagep)))
|
|
return (0);
|
|
|
|
if (!ISBIG(hashp, new_datalen) &&
|
|
add_len <= (int32_t)P_FREESPACE(hcp->pagep))
|
|
return (0);
|
|
|
|
/*
|
|
* If we get here, then we need to move the item to a new page.
|
|
* Check if there are more pages in the chain.
|
|
*/
|
|
|
|
new_datalen = ISBIG(hashp, new_datalen) ?
|
|
HOFFDUP_SIZE : HKEYDATA_SIZE(new_datalen);
|
|
|
|
next_pagep = NULL;
|
|
for (next_pgno = NEXT_PGNO(hcp->pagep); next_pgno != PGNO_INVALID;
|
|
next_pgno = NEXT_PGNO(next_pagep)) {
|
|
if (next_pagep != NULL &&
|
|
(ret = __ham_put_page(hashp->dbp, next_pagep, 0)) != 0)
|
|
return (ret);
|
|
|
|
if ((ret = __ham_get_page(hashp->dbp, next_pgno, &next_pagep)) != 0)
|
|
return (ret);
|
|
|
|
if (P_FREESPACE(next_pagep) >= new_datalen)
|
|
break;
|
|
}
|
|
|
|
/* No more pages, add one. */
|
|
if (next_pagep == NULL &&
|
|
(ret = __ham_add_ovflpage(hashp, hcp->pagep, 0, &next_pagep)) != 0)
|
|
return (ret);
|
|
|
|
/* Add new page at the end of the chain. */
|
|
if (P_FREESPACE(next_pagep) < new_datalen &&
|
|
(ret = __ham_add_ovflpage(hashp, next_pagep, 1, &next_pagep)) != 0)
|
|
return (ret);
|
|
|
|
/* Copy the item to the new page. */
|
|
if (DB_LOGGING(hashp->dbp)) {
|
|
rectype = PUTPAIR;
|
|
k.flags = 0;
|
|
d.flags = 0;
|
|
if (HPAGE_PTYPE(
|
|
H_PAIRKEY(hcp->pagep, hcp->bndx)) == H_OFFPAGE) {
|
|
rectype |= PAIR_KEYMASK;
|
|
k.data = H_PAIRKEY(hcp->pagep, hcp->bndx);
|
|
k.size = HOFFPAGE_SIZE;
|
|
} else {
|
|
k.data =
|
|
HKEYDATA_DATA(H_PAIRKEY(hcp->pagep, hcp->bndx));
|
|
k.size = LEN_HKEY(hcp->pagep,
|
|
hashp->hdr->pagesize, hcp->bndx);
|
|
}
|
|
|
|
if (HPAGE_PTYPE(hk) == H_OFFPAGE) {
|
|
rectype |= PAIR_DATAMASK;
|
|
d.data = H_PAIRDATA(hcp->pagep, hcp->bndx);
|
|
d.size = HOFFPAGE_SIZE;
|
|
} else {
|
|
d.data =
|
|
HKEYDATA_DATA(H_PAIRDATA(hcp->pagep, hcp->bndx));
|
|
d.size = LEN_HDATA(hcp->pagep,
|
|
hashp->hdr->pagesize, hcp->bndx);
|
|
}
|
|
|
|
|
|
if ((ret = __ham_insdel_log(hashp->dbp->dbenv->lg_info,
|
|
(DB_TXN *)hashp->dbp->txn, &new_lsn, 0, rectype,
|
|
hashp->dbp->log_fileid, PGNO(next_pagep),
|
|
(u_int32_t)H_NUMPAIRS(next_pagep), &LSN(next_pagep),
|
|
&k, &d)) != 0)
|
|
return (ret);
|
|
|
|
/* Move lsn onto page. */
|
|
LSN(next_pagep) = new_lsn; /* Structure assignment. */
|
|
}
|
|
|
|
__ham_copy_item(hashp, hcp->pagep, H_KEYINDEX(hcp->bndx), next_pagep);
|
|
__ham_copy_item(hashp, hcp->pagep, H_DATAINDEX(hcp->bndx), next_pagep);
|
|
|
|
/* Now delete the pair from the current page. */
|
|
ret = __ham_del_pair(hashp, hcp, 0);
|
|
|
|
(void)__ham_put_page(hashp->dbp, hcp->pagep, 1);
|
|
hcp->pagep = next_pagep;
|
|
hcp->pgno = PGNO(hcp->pagep);
|
|
hcp->bndx = H_NUMPAIRS(hcp->pagep) - 1;
|
|
F_SET(hcp, H_EXPAND);
|
|
return (ret);
|
|
}
|
|
|
|
/*
|
|
* Replace an onpage set of duplicates with the OFFDUP structure that
|
|
* references the duplicate page.
|
|
* XXX This is really just a special case of __onpage_replace; we should
|
|
* probably combine them.
|
|
* PUBLIC: void __ham_move_offpage __P((HTAB *, PAGE *, u_int32_t, db_pgno_t));
|
|
*/
|
|
void
|
|
__ham_move_offpage(hashp, pagep, ndx, pgno)
|
|
HTAB *hashp;
|
|
PAGE *pagep;
|
|
u_int32_t ndx;
|
|
db_pgno_t pgno;
|
|
{
|
|
DBT new_dbt;
|
|
DBT old_dbt;
|
|
HOFFDUP od;
|
|
db_indx_t i;
|
|
int32_t shrink;
|
|
u_int8_t *src;
|
|
|
|
od.type = H_OFFDUP;
|
|
od.pgno = pgno;
|
|
|
|
if (DB_LOGGING(hashp->dbp)) {
|
|
new_dbt.data = &od;
|
|
new_dbt.size = HOFFDUP_SIZE;
|
|
old_dbt.data = P_ENTRY(pagep, ndx);
|
|
old_dbt.size = LEN_HITEM(pagep, hashp->hdr->pagesize, ndx);
|
|
(void)__ham_replace_log(hashp->dbp->dbenv->lg_info,
|
|
(DB_TXN *)hashp->dbp->txn, &LSN(pagep), 0,
|
|
hashp->dbp->log_fileid, PGNO(pagep), (u_int32_t)ndx,
|
|
&LSN(pagep), -1, &old_dbt, &new_dbt, 0);
|
|
}
|
|
|
|
shrink =
|
|
LEN_HITEM(pagep, hashp->hdr->pagesize, ndx) - HOFFDUP_SIZE;
|
|
|
|
if (shrink != 0) {
|
|
/* Copy data. */
|
|
src = (u_int8_t *)(pagep) + HOFFSET(pagep);
|
|
memmove(src + shrink, src, pagep->inp[ndx] - HOFFSET(pagep));
|
|
HOFFSET(pagep) += shrink;
|
|
|
|
/* Update index table. */
|
|
for (i = ndx; i < NUM_ENT(pagep); i++)
|
|
pagep->inp[i] += shrink;
|
|
}
|
|
|
|
/* Now copy the offdup entry onto the page. */
|
|
memcpy(P_ENTRY(pagep, ndx), &od, HOFFDUP_SIZE);
|
|
}
|