#define DEFAULT_MAPSIZE 1048576
/* Lock descriptor stuff */
-#define RXBODY \
- ULONG mr_txnid; \
- pid_t mr_pid; \
- pthread_t mr_tid
-typedef struct MDB_rxbody {
- RXBODY;
-} MDB_rxbody;
-
#ifndef CACHELINE
-# ifdef __APPLE__
-# define CACHELINE 128 /* 64 is too small to contain a mutex */
-# else
-# define CACHELINE 64 /* most CPUs. Itanium uses 128 */
-# endif
+#define CACHELINE 64 /* most CPUs. Itanium uses 128 */
#endif
+typedef struct MDB_rxbody {
+ ULONG mrb_txnid;
+ pid_t mrb_pid;
+ pthread_t mrb_tid;
+} MDB_rxbody;
+
typedef struct MDB_reader {
- RXBODY;
- /* cache line alignment */
- char pad[CACHELINE-sizeof(MDB_rxbody)];
+ union {
+ MDB_rxbody mrx;
+#define mr_txnid mru.mrx.mrb_txnid
+#define mr_pid mru.mrx.mrb_pid
+#define mr_tid mru.mrx.mrb_tid
+ /* cache line alignment */
+ char pad[(sizeof(MDB_rxbody)+CACHELINE-1) & ~(CACHELINE-1)];
+ } mru;
} MDB_reader;
-#define TXBODY \
- uint32_t mt_magic; \
- uint32_t mt_version; \
- pthread_mutex_t mt_mutex; \
- ULONG mt_txnid; \
- uint32_t mt_numreaders
typedef struct MDB_txbody {
- TXBODY;
+ uint32_t mtb_magic;
+ uint32_t mtb_version;
+ pthread_mutex_t mtb_mutex;
+ ULONG mtb_txnid;
+ uint32_t mtb_numreaders;
+ uint32_t mtb_me_toggle;
} MDB_txbody;
typedef struct MDB_txninfo {
- TXBODY;
- char pad[CACHELINE-sizeof(MDB_txbody)];
- pthread_mutex_t mt_wmutex;
- char pad2[CACHELINE-sizeof(pthread_mutex_t)];
- MDB_reader mt_readers[1];
+ union {
+ MDB_txbody mtb;
+#define mti_magic mt1.mtb.mtb_magic
+#define mti_version mt1.mtb.mtb_version
+#define mti_mutex mt1.mtb.mtb_mutex
+#define mti_txnid mt1.mtb.mtb_txnid
+#define mti_numreaders mt1.mtb.mtb_numreaders
+#define mti_me_toggle mt1.mtb.mtb_me_toggle
+ char pad[(sizeof(MDB_txbody)+CACHELINE-1) & ~(CACHELINE-1)];
+ } mt1;
+ union {
+ pthread_mutex_t mt2_wmutex;
+#define mti_wmutex mt2.mt2_wmutex
+ char pad[(sizeof(pthread_mutex_t)+CACHELINE-1) & ~(CACHELINE-1)];
+ } mt2;
+ MDB_reader mti_readers[1];
} MDB_txninfo;
/* Common header for all page types. Overflow pages
#define mp_pgno mp_p.p_pgno
union padded {
pgno_t p_pgno; /* page number */
- void * p_pad;
+ void * p_align; /* for IL32P64 */
} mp_p;
#define P_BRANCH 0x01 /* branch page */
#define P_LEAF 0x02 /* leaf page */
#define P_OVERFLOW 0x04 /* overflow page */
#define P_META 0x08 /* meta page */
#define P_DIRTY 0x10 /* dirty page */
+#define P_LEAF2 0x20 /* DB with small, fixed size keys and no data */
uint32_t mp_flags;
#define mp_lower mp_pb.pb.pb_lower
#define mp_upper mp_pb.pb.pb_upper
indx_t pb_upper; /* upper bound of free space */
} pb;
uint32_t pb_pages; /* number of overflow pages */
+ struct {
+ indx_t pb_ksize; /* on a LEAF2 page */
+ indx_t pb_numkeys;
+ } pb2;
} mp_pb;
indx_t mp_ptrs[1]; /* dynamic size */
} MDB_page;
} MDB_meta;
typedef struct MDB_dhead { /* a dirty page */
- STAILQ_ENTRY(MDB_dpage) md_next; /* queue of dirty pages */
MDB_page *md_parent;
unsigned md_pi; /* parent index */
int md_num;
MDB_page p;
} MDB_dpage;
-STAILQ_HEAD(dirty_queue, MDB_dpage); /* FIXME: use a sorted data structure */
-
typedef struct MDB_oldpages {
struct MDB_oldpages *mo_next;
ULONG mo_txnid;
struct MDB_xcursor *mc_xcursor;
};
-#define METAHASHLEN offsetof(MDB_meta, mm_hash)
#define METADATA(p) ((void *)((char *)p + PAGEHDRSZ))
typedef struct MDB_node {
MDB_env *mt_env;
pgno_t *mt_free_pgs; /* this is an IDL */
union {
- struct dirty_queue *dirty_queue; /* modified pages */
+ MIDL2 *dirty_list; /* modified pages */
MDB_reader *reader;
} mt_u;
MDB_dbx *mt_dbxs; /* array */
MDB_db *mt_dbs;
unsigned int mt_numdbs;
-#define MDB_TXN_RDONLY 0x01 /* read-only transaction */
-#define MDB_TXN_ERROR 0x02 /* an error has occurred */
+#define MDB_TXN_RDONLY 0x01 /* read-only transaction */
+#define MDB_TXN_ERROR 0x02 /* an error has occurred */
#define MDB_TXN_METOGGLE 0x04 /* used meta page 1 */
unsigned int mt_flags;
};
struct MDB_env {
int me_fd;
int me_lfd;
- uint32_t me_flags;
+ int me_mfd; /* just for writing the meta pages */
+#define MDB_FATAL_ERROR 0x80000000U
+ uint32_t me_flags;
+ uint32_t me_extrapad; /* unused for now */
unsigned int me_maxreaders;
unsigned int me_numdbs;
unsigned int me_maxdbs;
MDB_txn *me_txn; /* current write transaction */
size_t me_mapsize;
off_t me_size; /* current file size */
+ pgno_t me_maxpg; /* me_mapsize / me_psize */
unsigned int me_psize;
- int me_db_toggle;
+ unsigned int me_db_toggle;
MDB_dbx *me_dbxs; /* array */
MDB_db *me_dbs[2];
MDB_oldpages *me_pghead;
pthread_key_t me_txkey; /* thread-key for readers */
pgno_t me_free_pgs[MDB_IDL_UM_SIZE];
+ MIDL2 me_dirty_list[MDB_IDL_DB_SIZE];
};
#define NODESIZE offsetof(MDB_node, mn_data)
#define NODEDSZ(node) ((node)->mn_dsize)
#define MDB_COMMIT_PAGES 64 /* max number of pages to write in one commit */
-#define MDB_MAXCACHE_DEF 1024 /* max number of pages to keep in cache */
static int mdb_search_page_root(MDB_txn *txn,
MDB_dbi dbi, MDB_val *key,
return *p1 - *p2;
}
+char *
+mdb_version(int *maj, int *min, int *pat)
+{
+ *maj = MDB_VERSION_MAJOR;
+ *min = MDB_VERSION_MINOR;
+ *pat = MDB_VERSION_PATCH;
+ return MDB_VERSION_STRING;
+}
+
+static const char *errstr[] = {
+ "MDB_KEYEXIST: Key/data pair already exists",
+ "MDB_NOTFOUND: No matching key/data pair found",
+ "MDB_PAGE_NOTFOUND: Requested page not found",
+ "MDB_CORRUPTED: Located page was wrong type",
+ "MDB_PANIC: Update of meta page failed",
+ "MDB_VERSION_MISMATCH: Database environment version mismatch"
+};
+
+char *
+mdb_strerror(int err)
+{
+ if (!err)
+ return ("Successful return: 0");
+
+ if (err >= MDB_KEYEXIST && err <= MDB_VERSION_MISMATCH)
+ return (char *)errstr[err - MDB_KEYEXIST];
+
+ return strerror(err);
+}
+
int
mdb_cmp(MDB_txn *txn, MDB_dbi dbi, const MDB_val *a, const MDB_val *b)
{
MDB_dpage *dp;
pgno_t pgno = P_INVALID;
ULONG oldest;
+ MIDL2 mid;
if (txn->mt_txnid > 2) {
}
if (txn->mt_env->me_pghead) {
unsigned int i;
- for (i=0; i<txn->mt_env->me_txns->mt_numreaders; i++) {
- ULONG mr = txn->mt_env->me_txns->mt_readers[i].mr_txnid;
+ for (i=0; i<txn->mt_env->me_txns->mti_numreaders; i++) {
+ ULONG mr = txn->mt_env->me_txns->mti_readers[i].mr_txnid;
if (!mr) continue;
if (mr < oldest)
- oldest = txn->mt_env->me_txns->mt_readers[i].mr_txnid;
+ oldest = txn->mt_env->me_txns->mti_readers[i].mr_txnid;
}
if (oldest > txn->mt_env->me_pghead->mo_txnid) {
MDB_oldpages *mop = txn->mt_env->me_pghead;
}
}
+ if (pgno == P_INVALID) {
+ /* DB size is maxed out */
+ if (txn->mt_next_pgno + num >= txn->mt_env->me_maxpg)
+ return NULL;
+ }
if ((dp = malloc(txn->mt_env->me_psize * num + sizeof(MDB_dhead))) == NULL)
return NULL;
dp->h.md_num = num;
dp->h.md_parent = parent;
dp->h.md_pi = parent_idx;
- STAILQ_INSERT_TAIL(txn->mt_u.dirty_queue, dp, h.md_next);
if (pgno == P_INVALID) {
dp->p.mp_pgno = txn->mt_next_pgno;
txn->mt_next_pgno += num;
} else {
dp->p.mp_pgno = pgno;
}
+ mid.mid = dp->p.mp_pgno;
+ mid.mptr = dp;
+ mdb_midl2_insert(txn->mt_u.dirty_list, &mid);
return dp;
}
}
int
-mdb_env_sync(MDB_env *env)
+mdb_env_sync(MDB_env *env, int force)
{
int rc = 0;
- if (!F_ISSET(env->me_flags, MDB_NOSYNC)) {
- if (fsync(env->me_fd))
+ if (force || !F_ISSET(env->me_flags, MDB_NOSYNC)) {
+ if (fdatasync(env->me_fd))
rc = errno;
}
return rc;
MDB_txn *txn;
int rc, toggle;
+ if (env->me_flags & MDB_FATAL_ERROR) {
+ DPRINTF("mdb_txn_begin: environment had fatal error, must shutdown!");
+ return MDB_PANIC;
+ }
if ((txn = calloc(1, sizeof(MDB_txn))) == NULL) {
DPRINTF("calloc: %s", strerror(errno));
return ENOMEM;
if (rdonly) {
txn->mt_flags |= MDB_TXN_RDONLY;
} else {
- txn->mt_u.dirty_queue = calloc(1, sizeof(*txn->mt_u.dirty_queue));
- if (txn->mt_u.dirty_queue == NULL) {
- free(txn);
- return ENOMEM;
- }
- STAILQ_INIT(txn->mt_u.dirty_queue);
-
- pthread_mutex_lock(&env->me_txns->mt_wmutex);
- env->me_txns->mt_txnid++;
+ txn->mt_u.dirty_list = env->me_dirty_list;
+ txn->mt_u.dirty_list[0].mid = 0;
txn->mt_free_pgs = env->me_free_pgs;
txn->mt_free_pgs[0] = 0;
+
+ pthread_mutex_lock(&env->me_txns->mti_wmutex);
+ env->me_txns->mti_txnid++;
}
- txn->mt_txnid = env->me_txns->mt_txnid;
+ txn->mt_txnid = env->me_txns->mti_txnid;
if (rdonly) {
MDB_reader *r = pthread_getspecific(env->me_txkey);
if (!r) {
unsigned int i;
- pthread_mutex_lock(&env->me_txns->mt_mutex);
- for (i=0; i<env->me_txns->mt_numreaders; i++)
- if (env->me_txns->mt_readers[i].mr_pid == 0)
+ pthread_mutex_lock(&env->me_txns->mti_mutex);
+ for (i=0; i<env->me_txns->mti_numreaders; i++)
+ if (env->me_txns->mti_readers[i].mr_pid == 0)
break;
if (i == env->me_maxreaders) {
pthread_mutex_unlock(&env->me_txns->mti_mutex);
return ENOSPC;
}
- env->me_txns->mt_readers[i].mr_pid = getpid();
- env->me_txns->mt_readers[i].mr_tid = pthread_self();
- r = &env->me_txns->mt_readers[i];
+ env->me_txns->mti_readers[i].mr_pid = getpid();
+ env->me_txns->mti_readers[i].mr_tid = pthread_self();
+ r = &env->me_txns->mti_readers[i];
pthread_setspecific(env->me_txkey, r);
- if (i >= env->me_txns->mt_numreaders)
- env->me_txns->mt_numreaders = i+1;
- pthread_mutex_unlock(&env->me_txns->mt_mutex);
+ if (i >= env->me_txns->mti_numreaders)
+ env->me_txns->mti_numreaders = i+1;
+ pthread_mutex_unlock(&env->me_txns->mti_mutex);
}
r->mr_txnid = txn->mt_txnid;
txn->mt_u.reader = r;
txn->mt_env = env;
+ toggle = env->me_txns->mti_me_toggle;
if ((rc = mdb_env_read_meta(env, &toggle)) != MDB_SUCCESS) {
mdb_txn_abort(txn);
return rc;
void
mdb_txn_abort(MDB_txn *txn)
{
- MDB_dpage *dp;
MDB_env *env;
if (txn == NULL)
unsigned int i;
/* Discard all dirty pages. */
- while (!STAILQ_EMPTY(txn->mt_u.dirty_queue)) {
- dp = STAILQ_FIRST(txn->mt_u.dirty_queue);
- STAILQ_REMOVE_HEAD(txn->mt_u.dirty_queue, h.md_next);
- free(dp);
- }
- free(txn->mt_u.dirty_queue);
+ for (i=1; i<=txn->mt_u.dirty_list[0].mid; i++)
+ free(txn->mt_u.dirty_list[i].mptr);
while ((mop = txn->mt_env->me_pghead)) {
txn->mt_env->me_pghead = mop->mo_next;
}
env->me_txn = NULL;
- env->me_txns->mt_txnid--;
+ env->me_txns->mti_txnid--;
for (i=2; i<env->me_numdbs; i++)
env->me_dbxs[i].md_dirty = 0;
- pthread_mutex_unlock(&env->me_txns->mt_wmutex);
+ pthread_mutex_unlock(&env->me_txns->mti_wmutex);
}
free(txn);
env = txn->mt_env;
if (F_ISSET(txn->mt_flags, MDB_TXN_RDONLY)) {
- DPRINTF("attempt to commit read-only transaction");
mdb_txn_abort(txn);
- return EPERM;
+ return MDB_SUCCESS;
}
if (txn != env->me_txn) {
return EINVAL;
}
- if (STAILQ_EMPTY(txn->mt_u.dirty_queue))
+ if (!txn->mt_u.dirty_list[0].mid)
goto done;
DPRINTF("committing transaction %lu on mdbenv %p, root page %lu",
n = 0;
done = 1;
size = 0;
- STAILQ_FOREACH(dp, txn->mt_u.dirty_queue, h.md_next) {
+ for (i=1; i<=txn->mt_u.dirty_list[0].mid; i++) {
+ dp = txn->mt_u.dirty_list[i].mptr;
if (dp->p.mp_pgno != next) {
if (n) {
DPRINTF("committing %u dirty pages", n);
/* Drop the dirty pages.
*/
- while (!STAILQ_EMPTY(txn->mt_u.dirty_queue)) {
- dp = STAILQ_FIRST(txn->mt_u.dirty_queue);
- STAILQ_REMOVE_HEAD(txn->mt_u.dirty_queue, h.md_next);
- free(dp);
- }
+ for (i=1; i<=txn->mt_u.dirty_list[0].mid; i++)
+ free(txn->mt_u.dirty_list[i].mptr);
- if ((n = mdb_env_sync(env)) != 0 ||
- (n = mdb_env_write_meta(txn)) != MDB_SUCCESS ||
- (n = mdb_env_sync(env)) != 0) {
+ if ((n = mdb_env_sync(env, 0)) != 0 ||
+ (n = mdb_env_write_meta(txn)) != MDB_SUCCESS) {
mdb_txn_abort(txn);
return n;
}
- env->me_txn = NULL;
+done:
+ env->me_txn = NULL;
/* update the DB tables */
{
int toggle = !env->me_db_toggle;
free(txn->mt_dbs);
}
- pthread_mutex_unlock(&env->me_txns->mt_wmutex);
- free(txn->mt_u.dirty_queue);
+ pthread_mutex_unlock(&env->me_txns->mti_wmutex);
free(txn);
- txn = NULL;
-
-done:
- mdb_txn_abort(txn);
return MDB_SUCCESS;
}
mdb_env_write_meta(MDB_txn *txn)
{
MDB_env *env;
- MDB_meta meta;
+ MDB_meta meta, metab;
off_t off;
- int rc, len;
+ int rc, len, toggle;
char *ptr;
assert(txn != NULL);
assert(txn->mt_env != NULL);
+ toggle = !F_ISSET(txn->mt_flags, MDB_TXN_METOGGLE);
DPRINTF("writing meta page %d for root page %lu",
- !F_ISSET(txn->mt_flags, MDB_TXN_METOGGLE), txn->mt_dbs[MAIN_DBI].md_root);
+ toggle, txn->mt_dbs[MAIN_DBI].md_root);
env = txn->mt_env;
+ metab.mm_txnid = env->me_metas[toggle]->mm_txnid;
+ metab.mm_last_pg = env->me_metas[toggle]->mm_last_pg;
+
ptr = (char *)&meta;
off = offsetof(MDB_meta, mm_dbs[0].md_depth);
len = sizeof(MDB_meta) - off;
meta.mm_last_pg = txn->mt_next_pgno - 1;
meta.mm_txnid = txn->mt_txnid;
- if (!F_ISSET(txn->mt_flags, MDB_TXN_METOGGLE))
+ if (toggle)
off += env->me_psize;
off += PAGEHDRSZ;
- lseek(env->me_fd, off, SEEK_SET);
- rc = write(env->me_fd, ptr, len);
+ /* Write to the SYNC fd */
+ rc = pwrite(env->me_mfd, ptr, len, off);
if (rc != len) {
DPRINTF("write failed, disk error?");
+ /* On a failure, the pagecache still contains the new data.
+ * Write some old data back, to prevent it from being used.
+ * Use the non-SYNC fd; we know it will fail anyway.
+ */
+ meta.mm_last_pg = metab.mm_last_pg;
+ meta.mm_txnid = metab.mm_txnid;
+ rc = pwrite(env->me_fd, ptr, len, off);
+ env->me_flags |= MDB_FATAL_ERROR;
return errno;
}
+ txn->mt_env->me_txns->mti_me_toggle = toggle;
return MDB_SUCCESS;
}
assert(env != NULL);
- if (env->me_metas[0]->mm_txnid < env->me_metas[1]->mm_txnid)
+ if (which)
+ toggle = *which;
+ else if (env->me_metas[0]->mm_txnid < env->me_metas[1]->mm_txnid)
toggle = 1;
if (env->me_meta != env->me_metas[toggle])
env->me_meta = env->me_metas[toggle];
- if (which)
- *which = toggle;
DPRINTF("Using meta page %d", toggle);
e->me_maxdbs = 2;
e->me_fd = -1;
e->me_lfd = -1;
+ e->me_mfd = -1;
*env = e;
return MDB_SUCCESS;
}
}
env->me_psize = meta.mm_psize;
- p = (MDB_page *)(MDB_page *)(MDB_page *)(MDB_page *)(MDB_page *)(MDB_page *)(MDB_page *)(MDB_page *)(MDB_page *)env->me_map;
+ env->me_maxpg = env->me_mapsize / env->me_psize;
+
+ p = (MDB_page *)env->me_map;
env->me_metas[0] = METADATA(p);
env->me_metas[1] = (MDB_meta *)((char *)env->me_metas[0] + meta.mm_psize);
{
struct flock lock_info;
- env->me_txns->mt_txnid = env->me_meta->mm_txnid;
+ env->me_txns->mti_txnid = env->me_meta->mm_txnid;
+ if (env->me_metas[0]->mm_txnid < env->me_metas[1]->mm_txnid)
+ env->me_txns->mti_me_toggle = 1;
memset((void *)&lock_info, 0, sizeof(lock_info));
lock_info.l_type = F_RDLCK;
pthread_mutexattr_t mattr;
pthread_mutexattr_init(&mattr);
- pthread_mutexattr_setpshared(&mattr, PTHREAD_PROCESS_SHARED);
- pthread_mutex_init(&env->me_txns->mt_mutex, &mattr);
- pthread_mutex_init(&env->me_txns->mt_wmutex, &mattr);
- env->me_txns->mt_version = MDB_VERSION;
- env->me_txns->mt_magic = MDB_MAGIC;
- env->me_txns->mt_txnid = 0;
- env->me_txns->mt_numreaders = 0;
+ rc = pthread_mutexattr_setpshared(&mattr, PTHREAD_PROCESS_SHARED);
+ if (rc) {
+ goto fail;
+ }
+ pthread_mutex_init(&env->me_txns->mti_mutex, &mattr);
+ pthread_mutex_init(&env->me_txns->mti_wmutex, &mattr);
+ env->me_txns->mti_version = MDB_VERSION;
+ env->me_txns->mti_magic = MDB_MAGIC;
+ env->me_txns->mti_txnid = 0;
+ env->me_txns->mti_numreaders = 0;
+ env->me_txns->mti_me_toggle = 0;
} else {
- if (env->me_txns->mt_magic != MDB_MAGIC) {
+ if (env->me_txns->mti_magic != MDB_MAGIC) {
DPRINTF("lock region has invalid magic");
rc = EINVAL;
goto fail;
}
- if (env->me_txns->mt_version != MDB_VERSION) {
+ if (env->me_txns->mti_version != MDB_VERSION) {
DPRINTF("lock region is version %u, expected version %u",
- env->me_txns->mt_version, MDB_VERSION);
+ env->me_txns->mti_version, MDB_VERSION);
rc = MDB_VERSION_MISMATCH;
goto fail;
}
fail:
close(env->me_lfd);
+ env->me_lfd = -1;
return rc;
}
else
oflags = O_RDWR | O_CREAT;
- if ((env->me_fd = open(dpath, oflags, mode)) == -1)
- return errno;
+ if ((env->me_fd = open(dpath, oflags, mode)) == -1) {
+ rc = errno;
+ goto leave;
+ }
+
+ if ((rc = mdb_env_open2(env, flags)) == MDB_SUCCESS) {
+ /* synchronous fd for meta writes */
+ if (!(flags & (MDB_RDONLY|MDB_NOSYNC)))
+ oflags |= O_DSYNC;
+ if ((env->me_mfd = open(dpath, oflags, mode)) == -1) {
+ rc = errno;
+ goto leave;
+ }
- if ((rc = mdb_env_open2(env, flags)) != MDB_SUCCESS) {
- close(env->me_fd);
- env->me_fd = -1;
- } else {
env->me_path = strdup(path);
DPRINTF("opened dbenv %p", (void *) env);
pthread_key_create(&env->me_txkey, mdb_env_reader_dest);
}
leave:
+ if (rc) {
+ if (env->me_fd >= 0) {
+ close(env->me_fd);
+ env->me_fd = -1;
+ }
+ if (env->me_lfd >= 0) {
+ close(env->me_lfd);
+ env->me_lfd = -1;
+ }
+ }
free(lpath);
return rc;
}
if (env->me_map) {
munmap(env->me_map, env->me_mapsize);
}
+ close(env->me_mfd);
close(env->me_fd);
if (env->me_txns) {
pid_t pid = getpid();
MDB_page *p = NULL;
int found = 0;
- if (!F_ISSET(txn->mt_flags, MDB_TXN_RDONLY) && !STAILQ_EMPTY(txn->mt_u.dirty_queue)) {
+ if (!F_ISSET(txn->mt_flags, MDB_TXN_RDONLY) && txn->mt_u.dirty_list[0].mid) {
MDB_dpage *dp;
- STAILQ_FOREACH(dp, txn->mt_u.dirty_queue, h.md_next) {
- if (dp->p.mp_pgno == pgno) {
- p = &dp->p;
- found = 1;
- break;
- }
+ MIDL2 id;
+ unsigned x;
+ id.mid = pgno;
+ x = mdb_midl2_search(txn->mt_u.dirty_list, &id);
+ if (x <= txn->mt_u.dirty_list[0].mid && txn->mt_u.dirty_list[x].mid == pgno) {
+ dp = txn->mt_u.dirty_list[x].mptr;
+ p = &dp->p;
+ found = 1;
}
}
if (!found) {
int rc;
if (cursor && cursor_push_page(cursor, mp) == NULL)
- return MDB_FAIL;
+ return ENOMEM;
while (IS_BRANCH(mp)) {
unsigned int i = 0;
mpp->mp_parent = mp;
if ((mp = mdb_get_page(txn, NODEPGNO(node))) == NULL)
- return MDB_FAIL;
+ return MDB_PAGE_NOTFOUND;
mpp->mp_pi = i;
mpp->mp_page = mp;
if (cursor && cursor_push_page(cursor, mp) == NULL)
- return MDB_FAIL;
+ return ENOMEM;
if (modify) {
MDB_dhead *dh = ((MDB_dhead *)mp)-1;
if (!IS_LEAF(mp)) {
DPRINTF("internal error, index points to a %02X page!?",
mp->mp_flags);
- return MDB_FAIL;
+ return MDB_CORRUPTED;
}
DPRINTF("found leaf page %lu for key %.*s", mp->mp_pgno,
}
if ((mpp->mp_page = mdb_get_page(txn, root)) == NULL)
- return MDB_FAIL;
+ return MDB_PAGE_NOTFOUND;
DPRINTF("root page has flags 0x%X", mpp->mp_page->mp_flags);
memcpy(&pgno, NODEDATA(leaf), sizeof(pgno));
if ((omp = mdb_get_page(txn, pgno)) == NULL) {
DPRINTF("read overflow page %lu failed", pgno);
- return MDB_FAIL;
+ return MDB_PAGE_NOTFOUND;
}
data->mv_data = omp;
indx = NODEPTR(parent->mp_page, parent->mp_ki);
if ((mp = mdb_get_page(cursor->mc_txn, indx->mn_pgno)) == NULL)
- return MDB_FAIL;
+ return MDB_PAGE_NOTFOUND;
#if 0
mp->parent = parent->mp_page;
mp->parent_index = parent->mp_ki;
cursor->mc_eof = 0;
if (data) {
- if ((rc = mdb_read_data(cursor->mc_txn, leaf, data)) != MDB_SUCCESS)
+ MDB_val d2;
+ if ((rc = mdb_read_data(cursor->mc_txn, leaf, &d2)) != MDB_SUCCESS)
return rc;
if (cursor->mc_txn->mt_dbs[cursor->mc_dbi].md_flags & MDB_DUPSORT) {
if (rc != MDB_SUCCESS)
return rc;
}
+ } else {
+ *data = d2;
}
+
}
rc = mdb_set_key(leaf, key);
data->mv_size);
node_size += sizeof(pgno_t);
if ((ofp = mdb_new_page(txn, dbi, P_OVERFLOW, ovpages)) == NULL)
- return MDB_FAIL;
+ return ENOMEM;
DPRINTF("allocated overflow page %lu", ofp->p.mp_pgno);
flags |= F_BIGDATA;
} else {
while(!CURSOR_EMPTY(cursor))
cursor_pop_page(cursor);
if (cursor->mc_txn->mt_dbs[cursor->mc_dbi].md_flags & MDB_DUPSORT) {
- mdb_xcursor_fini(cursor->mc_txn, cursor->mc_dbi, cursor->mc_xcursor);
while(!CURSOR_EMPTY(&cursor->mc_xcursor->mx_cursor))
cursor_pop_page(&cursor->mc_xcursor->mx_cursor);
}
DPRINTF("collapsing root page!");
txn->mt_dbs[dbi].md_root = NODEPGNO(NODEPTR(mpp->mp_page, 0));
if ((root = mdb_get_page(txn, txn->mt_dbs[dbi].md_root)) == NULL)
- return MDB_FAIL;
+ return MDB_PAGE_NOTFOUND;
txn->mt_dbs[dbi].md_depth--;
txn->mt_dbs[dbi].md_branch_pages--;
} else
DPRINTF("reading right neighbor");
node = NODEPTR(mpp->mp_parent, mpp->mp_pi + 1);
if ((npp.mp_page = mdb_get_page(txn, NODEPGNO(node))) == NULL)
- return MDB_FAIL;
+ return MDB_PAGE_NOTFOUND;
npp.mp_pi = mpp->mp_pi + 1;
si = 0;
di = NUMKEYS(mpp->mp_page);
DPRINTF("reading left neighbor");
node = NODEPTR(mpp->mp_parent, mpp->mp_pi - 1);
if ((npp.mp_page = mdb_get_page(txn, NODEPGNO(node))) == NULL)
- return MDB_FAIL;
+ return MDB_PAGE_NOTFOUND;
npp.mp_pi = mpp->mp_pi - 1;
si = NUMKEYS(npp.mp_page) - 1;
di = 0;
if (mdp->h.md_parent == NULL) {
if ((pdp = mdb_new_page(txn, dbi, P_BRANCH, 1)) == NULL)
- return MDB_FAIL;
+ return ENOMEM;
mdp->h.md_pi = 0;
mdp->h.md_parent = &pdp->p;
txn->mt_dbs[dbi].md_root = pdp->p.mp_pgno;
txn->mt_dbs[dbi].md_depth++;
/* Add left (implicit) pointer. */
- if (mdb_add_node(txn, dbi, &pdp->p, 0, NULL, NULL,
- mdp->p.mp_pgno, 0) != MDB_SUCCESS)
- return MDB_FAIL;
+ if ((rc = mdb_add_node(txn, dbi, &pdp->p, 0, NULL, NULL,
+ mdp->p.mp_pgno, 0)) != MDB_SUCCESS)
+ return rc;
} else {
DPRINTF("parent branch page is %lu", mdp->h.md_parent->mp_pgno);
}
/* Create a right sibling. */
if ((rdp = mdb_new_page(txn, dbi, mdp->p.mp_flags, 1)) == NULL)
- return MDB_FAIL;
+ return ENOMEM;
rdp->h.md_parent = mdp->h.md_parent;
rdp->h.md_pi = mdp->h.md_pi + 1;
DPRINTF("new right sibling: page %lu", rdp->p.mp_pgno);
/* Move half of the keys to the right sibling. */
if ((copy = malloc(txn->mt_env->me_psize)) == NULL)
- return MDB_FAIL;
+ return ENOMEM;
memcpy(copy, &mdp->p, txn->mt_env->me_psize);
memset(&mdp->p.mp_ptrs, 0, txn->mt_env->me_psize - PAGEHDRSZ);
mdp->p.mp_lower = PAGEHDRSZ;
}
if (rc != MDB_SUCCESS) {
free(copy);
- return MDB_FAIL;
+ return rc;
}
for (i = j = 0; i <= NUMKEYS(copy); j++) {
mpp.mp_page = &dp->p;
txn->mt_dbs[dbi].md_root = mpp.mp_page->mp_pgno;
txn->mt_dbs[dbi].md_depth++;
+ txn->mt_dbxs[dbi].md_dirty = 1;
ki = 0;
}
else
return mdb_put0(txn, dbi, key, data, flags);
}
+int
+mdb_env_set_flags(MDB_env *env, unsigned int flag, int onoff)
+{
+#define CHANGEABLE (MDB_NOSYNC)
+ if ((flag & CHANGEABLE) != flag)
+ return EINVAL;
+ if (onoff)
+ env->me_flags |= flag;
+ else
+ env->me_flags &= ~flag;
+ return MDB_SUCCESS;
+}
+
int
mdb_env_get_flags(MDB_env *env, unsigned int *arg)
{
txn->mt_dbxs[txn->mt_numdbs].md_dirty = dirty;
memcpy(&txn->mt_dbs[txn->mt_numdbs], data.mv_data, sizeof(MDB_db));
*dbi = txn->mt_numdbs;
+ txn->mt_env->me_dbs[0][txn->mt_numdbs] = txn->mt_dbs[txn->mt_numdbs];
+ txn->mt_env->me_dbs[1][txn->mt_numdbs] = txn->mt_dbs[txn->mt_numdbs];
txn->mt_numdbs++;
}