27 #define ULONG unsigned long
36 #if (DEBUG +0) && defined(__GNUC__)
37 # define DPRINTF(fmt, ...) \
38 fprintf(stderr, "%s:%d: " fmt "\n", __func__, __LINE__, ##__VA_ARGS__)
40 # define DPRINTF(...) ((void) 0)
45 #define MDB_MAGIC 0xBEEFC0DE
47 #define MAXKEYSIZE 255
49 #define P_INVALID (~0UL)
51 #define F_ISSET(w, f) (((w) & (f)) == (f))
53 typedef uint16_t indx_t;
55 #define DEFAULT_READERS 126
56 #define DEFAULT_MAPSIZE 1048576
58 /* Lock descriptor stuff */
63 typedef struct MDB_rxbody {
68 #define CACHELINE 64 /* most CPUs. Itanium uses 128 */
71 typedef struct MDB_reader {
73 /* cache line alignment */
74 char pad[CACHELINE-sizeof(MDB_rxbody)];
79 uint32_t mt_version; \
80 pthread_mutex_t mt_mutex; \
82 uint32_t mt_numreaders
83 typedef struct MDB_txbody {
87 typedef struct MDB_txninfo {
89 char pad[CACHELINE-sizeof(MDB_txbody)];
90 pthread_mutex_t mt_wmutex;
91 char pad2[CACHELINE-sizeof(pthread_mutex_t)];
92 MDB_reader mt_readers[1];
95 /* Common header for all page types. Overflow pages
96 * occupy a number of contiguous pages with no
97 * headers on any page after the first.
99 typedef struct MDB_page { /* represents a page of storage */
100 pgno_t mp_pgno; /* page number */
101 #define P_BRANCH 0x01 /* branch page */
102 #define P_LEAF 0x02 /* leaf page */
103 #define P_OVERFLOW 0x04 /* overflow page */
104 #define P_META 0x08 /* meta page */
105 #define P_DIRTY 0x10 /* dirty page */
107 #define mp_lower mp_pb.pb.pb_lower
108 #define mp_upper mp_pb.pb.pb_upper
109 #define mp_pages mp_pb.pb_pages
112 indx_t pb_lower; /* lower bound of free space */
113 indx_t pb_upper; /* upper bound of free space */
115 uint32_t pb_pages; /* number of overflow pages */
117 indx_t mp_ptrs[1]; /* dynamic size */
120 #define PAGEHDRSZ ((unsigned) offsetof(MDB_page, mp_ptrs))
122 #define NUMKEYS(p) (((p)->mp_lower - PAGEHDRSZ) >> 1)
123 #define SIZELEFT(p) (indx_t)((p)->mp_upper - (p)->mp_lower)
124 #define PAGEFILL(env, p) (1000L * ((env)->me_meta.mm_psize - PAGEHDRSZ - SIZELEFT(p)) / \
125 ((env)->me_meta.mm_psize - PAGEHDRSZ))
126 #define IS_LEAF(p) F_ISSET((p)->mp_flags, P_LEAF)
127 #define IS_BRANCH(p) F_ISSET((p)->mp_flags, P_BRANCH)
128 #define IS_OVERFLOW(p) F_ISSET((p)->mp_flags, P_OVERFLOW)
130 #define OVPAGES(size, psize) (PAGEHDRSZ + size + psize - 1) / psize;
132 typedef struct MDB_meta { /* meta (footer) page content */
135 void *mm_address; /* address for fixed mapping */
136 size_t mm_mapsize; /* size of mmap region */
137 pgno_t mm_last_pg; /* last used page in file */
138 ULONG mm_txnid; /* txnid that committed this page */
142 ULONG mm_branch_pages;
144 ULONG mm_overflow_pages;
149 typedef struct MDB_dhead { /* a dirty page */
150 SIMPLEQ_ENTRY(MDB_dpage) md_next; /* queue of dirty pages */
152 unsigned md_pi; /* parent index */
156 typedef struct MDB_dpage {
161 SIMPLEQ_HEAD(dirty_queue, MDB_dpage);
163 typedef struct MDB_oldpages {
164 struct MDB_oldpages *mo_next;
166 pgno_t mo_pages[1]; /* dynamic */
169 typedef struct MDB_pageparent {
175 static MDB_dpage *mdb_alloc_page(MDB_txn *txn, MDB_page *parent, unsigned int parent_idx, int num);
176 static int mdb_touch(MDB_txn *txn, MDB_pageparent *mp);
178 typedef struct MDB_ppage { /* ordered list of pages */
179 SLIST_ENTRY(MDB_ppage) mp_entry;
181 unsigned int mp_ki; /* cursor index on page */
183 SLIST_HEAD(page_stack, MDB_ppage);
185 #define CURSOR_EMPTY(c) SLIST_EMPTY(&(c)->mc_stack)
186 #define CURSOR_TOP(c) SLIST_FIRST(&(c)->mc_stack)
187 #define CURSOR_POP(c) SLIST_REMOVE_HEAD(&(c)->mc_stack, mp_entry)
188 #define CURSOR_PUSH(c,p) SLIST_INSERT_HEAD(&(c)->mc_stack, p, mp_entry)
192 struct page_stack mc_stack; /* stack of parent pages */
194 short mc_initialized; /* 1 if initialized */
195 short mc_eof; /* 1 if end is reached */
198 #define METAHASHLEN offsetof(MDB_meta, mm_hash)
199 #define METADATA(p) ((void *)((char *)p + PAGEHDRSZ))
201 typedef struct MDB_node {
202 #define mn_pgno mn_p.np_pgno
203 #define mn_dsize mn_p.np_dsize
205 pgno_t np_pgno; /* child page number */
206 uint32_t np_dsize; /* leaf data size */
208 unsigned int mn_flags:4;
209 unsigned int mn_ksize:12; /* key size */
210 #define F_BIGDATA 0x01 /* data put on overflow page */
214 typedef struct MDB_dbx {
216 MDB_cmp_func *md_cmp; /* user compare function */
217 MDB_rel_func *md_rel; /* user relocate function */
220 typedef struct MDB_db {
224 ULONG md_branch_pages;
226 ULONG md_overflow_pages;
232 pgno_t mt_root; /* current / new root page */
233 pgno_t mt_next_pgno; /* next unallocated page */
237 pgno_t *mt_free_pgs; /* this is an IDL */
239 struct dirty_queue *dirty_queue; /* modified pages */
242 MDB_dbx *mt_dbxs; /* array */
243 MDB_db **mt_dbs; /* array of ptrs */
244 unsigned int mt_numdbs;
246 #define MDB_TXN_RDONLY 0x01 /* read-only transaction */
247 #define MDB_TXN_ERROR 0x02 /* an error has occurred */
248 #define MDB_TXN_METOGGLE 0x04 /* used meta page 1 */
249 unsigned int mt_flags;
256 unsigned int me_maxreaders;
259 MDB_txninfo *me_txns;
261 MDB_txn *me_txn; /* current write transaction */
263 off_t me_size; /* current file size */
264 pthread_key_t me_txkey; /* thread-key for readers */
265 MDB_oldpages *me_pghead;
266 MDB_oldpages *me_pgtail;
267 MDB_dbx *me_dbxs; /* array */
268 MDB_db **me_dbs; /* array of ptrs */
269 unsigned int me_numdbs;
272 #define NODESIZE offsetof(MDB_node, mn_data)
274 #define INDXSIZE(k) (NODESIZE + ((k) == NULL ? 0 : (k)->mv_size))
275 #define LEAFSIZE(k, d) (NODESIZE + (k)->mv_size + (d)->mv_size)
276 #define NODEPTR(p, i) ((MDB_node *)((char *)(p) + (p)->mp_ptrs[i]))
277 #define NODEKEY(node) (void *)((node)->mn_data)
278 #define NODEDATA(node) (void *)((char *)(node)->mn_data + (node)->mn_ksize)
279 #define NODEPGNO(node) ((node)->mn_pgno)
280 #define NODEDSZ(node) ((node)->mn_dsize)
282 #define MDB_COMMIT_PAGES 64 /* max number of pages to write in one commit */
283 #define MDB_MAXCACHE_DEF 1024 /* max number of pages to keep in cache */
285 static int mdb_search_page_root(MDB_txn *txn,
286 MDB_dbi dbi, MDB_val *key,
287 MDB_cursor *cursor, int modify,
288 MDB_pageparent *mpp);
289 static int mdb_search_page(MDB_txn *txn,
290 MDB_dbi dbi, MDB_val *key,
291 MDB_cursor *cursor, int modify,
292 MDB_pageparent *mpp);
294 static int mdbenv_read_header(MDB_env *env);
295 static int mdb_check_meta_page(MDB_page *p);
296 static int mdbenv_read_meta(MDB_env *env, int *which);
297 static int mdbenv_write_meta(MDB_txn *txn);
298 static MDB_page *mdbenv_get_page(MDB_env *env, pgno_t pgno);
300 static MDB_node *mdb_search_node(MDB_txn *txn, MDB_dbi dbi, MDB_page *mp,
301 MDB_val *key, int *exactp, unsigned int *kip);
302 static int mdb_add_node(MDB_txn *txn, MDB_dbi dbi, MDB_page *mp,
303 indx_t indx, MDB_val *key, MDB_val *data,
304 pgno_t pgno, uint8_t flags);
305 static void mdb_del_node(MDB_page *mp, indx_t indx);
306 static int mdb_read_data(MDB_env *env, MDB_node *leaf, MDB_val *data);
308 static int mdb_rebalance(MDB_txn *txn, MDB_dbi dbi, MDB_pageparent *mp);
309 static int mdb_update_key(MDB_page *mp, indx_t indx, MDB_val *key);
310 static int mdb_move_node(MDB_txn *txn, MDB_dbi dbi,
311 MDB_pageparent *src, indx_t srcindx,
312 MDB_pageparent *dst, indx_t dstindx);
313 static int mdb_merge(MDB_txn *txn, MDB_dbi dbi, MDB_pageparent *src,
314 MDB_pageparent *dst);
315 static int mdb_split(MDB_txn *txn, MDB_dbi dbi, MDB_page **mpp,
316 unsigned int *newindxp, MDB_val *newkey,
317 MDB_val *newdata, pgno_t newpgno);
318 static MDB_dpage *mdb_new_page(MDB_txn *txn, MDB_dbi dbi, uint32_t flags, int num);
320 static void cursor_pop_page(MDB_cursor *cursor);
321 static MDB_ppage *cursor_push_page(MDB_cursor *cursor,
324 static int mdb_set_key(MDB_node *node, MDB_val *key);
325 static int mdb_sibling(MDB_cursor *cursor, int move_right);
326 static int mdb_cursor_next(MDB_cursor *cursor,
327 MDB_val *key, MDB_val *data);
328 static int mdb_cursor_set(MDB_cursor *cursor,
329 MDB_val *key, MDB_val *data, int *exactp);
330 static int mdb_cursor_first(MDB_cursor *cursor,
331 MDB_val *key, MDB_val *data);
333 static size_t mdb_leaf_size(MDB_env *env, MDB_val *key,
335 static size_t mdb_branch_size(MDB_env *env, MDB_val *key);
337 static int memncmp(const void *s1, size_t n1,
338 const void *s2, size_t n2);
339 static int memnrcmp(const void *s1, size_t n1,
340 const void *s2, size_t n2);
343 memncmp(const void *s1, size_t n1, const void *s2, size_t n2)
345 int diff, len_diff = -1;
348 len_diff = (n1 > n2);
351 diff = memcmp(s1, s2, n1);
352 return diff ? diff : len_diff;
356 memnrcmp(const void *s1, size_t n1, const void *s2, size_t n2)
358 const unsigned char *p1, *p2, *p1_lim;
365 p1 = (const unsigned char *)s1 + n1 - 1;
366 p2 = (const unsigned char *)s2 + n2 - 1;
368 for (p1_lim = (n1 <= n2 ? s1 : s2); *p1 == *p2; p1--, p2--) {
370 return (p1 != s1) ? (p1 != p2) : (p2 != s2) ? -1 : 0;
376 mdb_cmp(MDB_txn *txn, MDB_dbi dbi, const MDB_val *a, const MDB_val *b)
378 return txn->mt_dbxs[dbi].md_cmp(a, b);
382 _mdb_cmp(MDB_txn *txn, MDB_dbi dbi, const MDB_val *key1, const MDB_val *key2)
384 if (F_ISSET(txn->mt_dbs[dbi]->md_flags, MDB_REVERSEKEY))
385 return memnrcmp(key1->mv_data, key1->mv_size, key2->mv_data, key2->mv_size);
387 return memncmp((char *)key1->mv_data, key1->mv_size, key2->mv_data, key2->mv_size);
390 /* Allocate new page(s) for writing */
392 mdb_alloc_page(MDB_txn *txn, MDB_page *parent, unsigned int parent_idx, int num)
395 pgno_t pgno = P_INVALID;
397 if (txn->mt_env->me_pghead) {
398 ULONG oldest = txn->mt_txnid - 2;
400 for (i=0; i<txn->mt_env->me_txns->mt_numreaders; i++) {
401 if (txn->mt_env->me_txns->mt_readers[i].mr_txnid < oldest)
402 oldest = txn->mt_env->me_txns->mt_readers[i].mr_txnid;
404 if (oldest > txn->mt_env->me_pghead->mo_txnid) {
405 MDB_oldpages *mop = txn->mt_env->me_pghead;
406 txn->mt_oldest = oldest;
408 /* FIXME: For now, always use fresh pages. We
409 * really ought to search the free list for a
414 /* peel pages off tail, so we only have to truncate the list */
415 pgno = MDB_IDL_LAST(mop->mo_pages);
416 if (MDB_IDL_IS_RANGE(mop->mo_pages)) {
418 if (mop->mo_pages[2] > mop->mo_pages[1])
419 mop->mo_pages[0] = 0;
423 if (MDB_IDL_IS_ZERO(mop->mo_pages)) {
424 txn->mt_env->me_pghead = mop->mo_next;
425 if (!txn->mt_env->me_pghead)
426 txn->mt_env->me_pgtail = NULL;
433 if ((dp = malloc(txn->mt_env->me_meta.mm_psize * num + sizeof(MDB_dhead))) == NULL)
436 dp->h.md_parent = parent;
437 dp->h.md_pi = parent_idx;
438 SIMPLEQ_INSERT_TAIL(txn->mt_u.dirty_queue, dp, h.md_next);
439 if (pgno == P_INVALID) {
440 dp->p.mp_pgno = txn->mt_next_pgno;
441 txn->mt_next_pgno += num;
443 dp->p.mp_pgno = pgno;
449 /* Touch a page: make it dirty and re-insert into tree with updated pgno.
452 mdb_touch(MDB_txn *txn, MDB_pageparent *pp)
454 MDB_page *mp = pp->mp_page;
459 if (!F_ISSET(mp->mp_flags, P_DIRTY)) {
461 if ((dp = mdb_alloc_page(txn, pp->mp_parent, pp->mp_pi, 1)) == NULL)
463 DPRINTF("touched page %lu -> %lu", mp->mp_pgno, dp->p.mp_pgno);
464 mdb_idl_insert(txn->mt_free_pgs, mp->mp_pgno);
465 pgno = dp->p.mp_pgno;
466 memcpy(&dp->p, mp, txn->mt_env->me_meta.mm_psize);
469 mp->mp_flags |= P_DIRTY;
471 /* Update the page number to new touched page. */
472 if (pp->mp_parent != NULL)
473 NODEPGNO(NODEPTR(pp->mp_parent, pp->mp_pi)) = mp->mp_pgno;
480 mdbenv_sync(MDB_env *env)
483 if (!F_ISSET(env->me_flags, MDB_NOSYNC)) {
484 if (fsync(env->me_fd))
490 #define DBX_CHUNK 16 /* space for 16 DBs at a time */
493 mdb_txn_begin(MDB_env *env, int rdonly, MDB_txn **ret)
498 if ((txn = calloc(1, sizeof(*txn))) == NULL) {
499 DPRINTF("calloc: %s", strerror(errno));
504 txn->mt_flags |= MDB_TXN_RDONLY;
506 txn->mt_u.dirty_queue = calloc(1, sizeof(*txn->mt_u.dirty_queue));
507 if (txn->mt_u.dirty_queue == NULL) {
511 SIMPLEQ_INIT(txn->mt_u.dirty_queue);
513 pthread_mutex_lock(&env->me_txns->mt_wmutex);
514 env->me_txns->mt_txnid++;
515 txn->mt_free_pgs = malloc(MDB_IDL_UM_SIZEOF);
516 if (txn->mt_free_pgs == NULL) {
517 free(txn->mt_u.dirty_queue);
521 txn->mt_free_pgs[0] = 0;
523 /* Copy the DB arrays */
524 txn->mt_numdbs = env->me_numdbs;
525 rc = (txn->mt_numdbs % DBX_CHUNK) + 1;
526 txn->mt_dbxs = malloc(rc * DBX_CHUNK * sizeof(MDB_dbx));
527 txn->mt_dbs = malloc(rc * DBX_CHUNK * sizeof(MDB_db *));
528 memcpy(txn->mt_dbxs, env->me_dbxs, txn->mt_numdbs * sizeof(MDB_dbx));
529 memcpy(txn->mt_dbs, env->me_dbs, txn->mt_numdbs * sizeof(MDB_db *));
531 txn->mt_txnid = env->me_txns->mt_txnid;
533 MDB_reader *r = pthread_getspecific(env->me_txkey);
536 pthread_mutex_lock(&env->me_txns->mt_mutex);
537 for (i=0; i<env->me_maxreaders; i++) {
538 if (env->me_txns->mt_readers[i].mr_pid == 0) {
539 env->me_txns->mt_readers[i].mr_pid = getpid();
540 env->me_txns->mt_readers[i].mr_tid = pthread_self();
541 r = &env->me_txns->mt_readers[i];
542 pthread_setspecific(env->me_txkey, r);
543 if (i >= env->me_txns->mt_numreaders)
544 env->me_txns->mt_numreaders = i+1;
548 pthread_mutex_unlock(&env->me_txns->mt_mutex);
549 if (i == env->me_maxreaders) {
553 r->mr_txnid = txn->mt_txnid;
554 txn->mt_u.reader = r;
561 if ((rc = mdbenv_read_meta(env, &toggle)) != MDB_SUCCESS) {
567 txn->mt_flags |= MDB_TXN_METOGGLE;
569 txn->mt_next_pgno = env->me_meta.mm_last_pg+1;
570 txn->mt_root = env->me_meta.mm_root;
571 DPRINTF("begin transaction %lu on mdbenv %p, root page %lu",
572 txn->mt_txnid, (void *) env, txn->mt_root);
579 mdb_txn_abort(MDB_txn *txn)
588 DPRINTF("abort transaction %lu on mdbenv %p, root page %lu",
589 txn->mt_txnid, (void *) env, txn->mt_root);
594 if (F_ISSET(txn->mt_flags, MDB_TXN_RDONLY)) {
595 txn->mt_u.reader->mr_txnid = 0;
597 /* Discard all dirty pages. Return any re-used pages
600 MDB_IDL_ZERO(txn->mt_free_pgs);
601 while (!SIMPLEQ_EMPTY(txn->mt_u.dirty_queue)) {
602 dp = SIMPLEQ_FIRST(txn->mt_u.dirty_queue);
603 SIMPLEQ_REMOVE_HEAD(txn->mt_u.dirty_queue, h.md_next);
604 if (dp->p.mp_pgno <= env->me_meta.mm_last_pg)
605 mdb_idl_insert(txn->mt_free_pgs, dp->p.mp_pgno);
608 /* put back to head of free list */
609 if (!MDB_IDL_IS_ZERO(txn->mt_free_pgs)) {
612 mop = malloc(sizeof(MDB_oldpages) + MDB_IDL_SIZEOF(txn->mt_free_pgs) - sizeof(pgno_t));
613 mop->mo_next = env->me_pghead;
614 mop->mo_txnid = txn->mt_oldest - 1;
615 if (!env->me_pghead) {
616 env->me_pgtail = mop;
618 env->me_pghead = mop;
619 memcpy(mop->mo_pages, txn->mt_free_pgs, MDB_IDL_SIZEOF(txn->mt_free_pgs));
622 free(txn->mt_free_pgs);
623 free(txn->mt_u.dirty_queue);
625 env->me_txns->mt_txnid--;
626 pthread_mutex_unlock(&env->me_txns->mt_wmutex);
633 mdb_txn_commit(MDB_txn *txn)
641 struct iovec iov[MDB_COMMIT_PAGES];
644 assert(txn->mt_env != NULL);
648 if (F_ISSET(txn->mt_flags, MDB_TXN_RDONLY)) {
649 DPRINTF("attempt to commit read-only transaction");
654 if (txn != env->me_txn) {
655 DPRINTF("attempt to commit unknown transaction");
660 if (F_ISSET(txn->mt_flags, MDB_TXN_ERROR)) {
661 DPRINTF("error flag is set, can't commit");
666 if (SIMPLEQ_EMPTY(txn->mt_u.dirty_queue))
669 DPRINTF("committing transaction %lu on mdbenv %p, root page %lu",
670 txn->mt_txnid, (void *) env, txn->mt_root);
672 /* Commit up to MDB_COMMIT_PAGES dirty pages to disk until done.
679 SIMPLEQ_FOREACH(dp, txn->mt_u.dirty_queue, h.md_next) {
680 if (dp->p.mp_pgno != next) {
682 DPRINTF("committing %u dirty pages", n);
683 rc = writev(env->me_fd, iov, n);
687 DPRINTF("short write, filesystem full?");
689 DPRINTF("writev: %s", strerror(errno));
696 lseek(env->me_fd, dp->p.mp_pgno * env->me_meta.mm_psize, SEEK_SET);
697 next = dp->p.mp_pgno;
699 DPRINTF("committing page %lu", dp->p.mp_pgno);
700 iov[n].iov_len = env->me_meta.mm_psize * dp->h.md_num;
701 iov[n].iov_base = &dp->p;
702 size += iov[n].iov_len;
703 next = dp->p.mp_pgno + dp->h.md_num;
704 /* clear dirty flag */
705 dp->p.mp_flags &= ~P_DIRTY;
706 if (++n >= MDB_COMMIT_PAGES) {
715 DPRINTF("committing %u dirty pages", n);
716 rc = writev(env->me_fd, iov, n);
720 DPRINTF("short write, filesystem full?");
722 DPRINTF("writev: %s", strerror(errno));
729 /* Drop the dirty pages.
731 while (!SIMPLEQ_EMPTY(txn->mt_u.dirty_queue)) {
732 dp = SIMPLEQ_FIRST(txn->mt_u.dirty_queue);
733 SIMPLEQ_REMOVE_HEAD(txn->mt_u.dirty_queue, h.md_next);
737 if ((n = mdbenv_sync(env)) != 0 ||
738 (n = mdbenv_write_meta(txn)) != MDB_SUCCESS ||
739 (n = mdbenv_sync(env)) != 0) {
746 MDB_dbx *p1 = env->me_dbxs;
747 MDB_db **p2 = env->me_dbs;
749 env->me_dbxs = txn->mt_dbxs;
750 env->me_dbs = txn->mt_dbs;
756 /* add to tail of free list */
757 if (!MDB_IDL_IS_ZERO(txn->mt_free_pgs)) {
760 mop = malloc(sizeof(MDB_oldpages) + MDB_IDL_SIZEOF(txn->mt_free_pgs) - sizeof(pgno_t));
762 if (env->me_pghead) {
763 env->me_pgtail->mo_next = mop;
765 env->me_pghead = mop;
767 env->me_pgtail = mop;
768 memcpy(mop->mo_pages, txn->mt_free_pgs, MDB_IDL_SIZEOF(txn->mt_free_pgs));
769 mop->mo_txnid = txn->mt_txnid;
772 pthread_mutex_unlock(&env->me_txns->mt_wmutex);
773 free(txn->mt_free_pgs);
774 free(txn->mt_u.dirty_queue);
785 mdbenv_read_header(MDB_env *env)
794 /* We don't know the page size yet, so use a minimum value.
797 if ((rc = pread(env->me_fd, page, PAGESIZE, 0)) == 0) {
799 } else if (rc != PAGESIZE) {
802 DPRINTF("read: %s", strerror(errno));
806 p = (MDB_page *)page;
808 if (!F_ISSET(p->mp_flags, P_META)) {
809 DPRINTF("page %lu not a meta page", p->mp_pgno);
814 if (m->mm_magic != MDB_MAGIC) {
815 DPRINTF("meta has invalid magic");
819 if (m->mm_version != MDB_VERSION) {
820 DPRINTF("database is version %u, expected version %u",
821 m->mm_version, MDB_VERSION);
825 memcpy(&env->me_meta, m, sizeof(*m));
830 mdbenv_init_meta(MDB_env *env)
837 DPRINTF("writing new meta page");
838 psize = sysconf(_SC_PAGE_SIZE);
840 env->me_meta.mm_magic = MDB_MAGIC;
841 env->me_meta.mm_version = MDB_VERSION;
842 env->me_meta.mm_psize = psize;
843 env->me_meta.mm_flags = env->me_flags & 0xffff;
844 env->me_meta.mm_root = P_INVALID;
845 env->me_meta.mm_last_pg = 1;
847 p = calloc(2, psize);
849 p->mp_flags = P_META;
852 memcpy(meta, &env->me_meta, sizeof(*meta));
854 q = (MDB_page *)((char *)p + psize);
857 q->mp_flags = P_META;
860 memcpy(meta, &env->me_meta, sizeof(*meta));
862 rc = write(env->me_fd, p, psize * 2);
864 return (rc == (int)psize * 2) ? MDB_SUCCESS : errno;
868 mdbenv_write_meta(MDB_txn *txn)
877 assert(txn->mt_env != NULL);
879 DPRINTF("writing meta page for root page %lu", txn->mt_root);
884 off = offsetof(MDB_meta, mm_depth);
885 len = sizeof(MDB_meta) - off;
888 meta.mm_depth = txn->mt_dbs[0]->md_depth;
889 meta.mm_branch_pages = txn->mt_dbs[0]->md_branch_pages;
890 meta.mm_leaf_pages = txn->mt_dbs[0]->md_leaf_pages;
891 meta.mm_overflow_pages = txn->mt_dbs[0]->md_overflow_pages;
892 meta.mm_entries = txn->mt_dbs[0]->md_entries;
893 meta.mm_root = txn->mt_root;
894 meta.mm_last_pg = txn->mt_next_pgno - 1;
895 meta.mm_txnid = txn->mt_txnid;
897 if (!F_ISSET(txn->mt_flags, MDB_TXN_METOGGLE))
898 off += env->me_meta.mm_psize;
901 lseek(env->me_fd, off, SEEK_SET);
902 rc = write(env->me_fd, ptr, len);
904 DPRINTF("write failed, disk error?");
911 /* Returns true if page p is a valid meta page, false otherwise.
914 mdb_check_meta_page(MDB_page *p)
916 if (!F_ISSET(p->mp_flags, P_META)) {
917 DPRINTF("page %lu not a meta page", p->mp_pgno);
925 mdbenv_read_meta(MDB_env *env, int *which)
933 if ((mp0 = mdbenv_get_page(env, 0)) == NULL ||
934 (mp1 = mdbenv_get_page(env, 1)) == NULL)
937 rc = mdb_check_meta_page(mp0);
940 rc = mdb_check_meta_page(mp1);
943 meta[0] = METADATA(mp0);
944 meta[1] = METADATA(mp1);
946 if (meta[0]->mm_txnid < meta[1]->mm_txnid)
949 if (meta[toggle]->mm_txnid > env->me_meta.mm_txnid) {
950 memcpy(&env->me_meta, meta[toggle], sizeof(env->me_meta));
955 DPRINTF("Using meta page %d", toggle);
961 mdbenv_create(MDB_env **env)
965 e = calloc(1, sizeof(*e));
966 if (!e) return ENOMEM;
968 e->me_meta.mm_mapsize = DEFAULT_MAPSIZE;
969 e->me_maxreaders = DEFAULT_READERS;
977 mdbenv_set_mapsize(MDB_env *env, size_t size)
981 env->me_mapsize = env->me_meta.mm_mapsize = size;
986 mdbenv_set_maxreaders(MDB_env *env, int readers)
988 env->me_maxreaders = readers;
993 mdbenv_get_maxreaders(MDB_env *env, int *readers)
995 if (!env || !readers)
997 *readers = env->me_maxreaders;
1002 mdbenv_open2(MDB_env *env, unsigned int flags)
1006 env->me_flags = flags;
1008 if ((i = mdbenv_read_header(env)) != 0) {
1011 DPRINTF("new mdbenv");
1015 if (!env->me_mapsize)
1016 env->me_mapsize = env->me_meta.mm_mapsize;
1019 if (env->me_meta.mm_address && (flags & MDB_FIXEDMAP))
1021 env->me_map = mmap(env->me_meta.mm_address, env->me_mapsize, PROT_READ, i,
1023 if (env->me_map == MAP_FAILED)
1027 env->me_meta.mm_mapsize = env->me_mapsize;
1028 if (flags & MDB_FIXEDMAP)
1029 env->me_meta.mm_address = env->me_map;
1030 i = mdbenv_init_meta(env);
1031 if (i != MDB_SUCCESS) {
1032 munmap(env->me_map, env->me_mapsize);
1037 if ((i = mdbenv_read_meta(env, NULL)) != 0)
1040 DPRINTF("opened database version %u, pagesize %u",
1041 env->me_meta.mm_version, env->me_meta.mm_psize);
1042 DPRINTF("depth: %u", env->me_meta.mm_depth);
1043 DPRINTF("entries: %lu", env->me_meta.mm_entries);
1044 DPRINTF("branch pages: %lu", env->me_meta.mm_branch_pages);
1045 DPRINTF("leaf pages: %lu", env->me_meta.mm_leaf_pages);
1046 DPRINTF("overflow pages: %lu", env->me_meta.mm_overflow_pages);
1047 DPRINTF("root: %lu", env->me_meta.mm_root);
1053 mdbenv_reader_dest(void *ptr)
1055 MDB_reader *reader = ptr;
1057 reader->mr_txnid = 0;
1063 mdbenv_share_locks(MDB_env *env)
1065 struct flock lock_info;
1067 env->me_txns->mt_txnid = env->me_meta.mm_txnid;
1069 memset((void *)&lock_info, 0, sizeof(lock_info));
1070 lock_info.l_type = F_RDLCK;
1071 lock_info.l_whence = SEEK_SET;
1072 lock_info.l_start = 0;
1073 lock_info.l_len = 1;
1074 fcntl(env->me_lfd, F_SETLK, &lock_info);
1078 mdbenv_setup_locks(MDB_env *env, char *lpath, int mode, int *excl)
1082 struct flock lock_info;
1086 if ((env->me_lfd = open(lpath, O_RDWR|O_CREAT, mode)) == -1) {
1090 /* Try to get exclusive lock. If we succeed, then
1091 * nobody is using the lock region and we should initialize it.
1093 memset((void *)&lock_info, 0, sizeof(lock_info));
1094 lock_info.l_type = F_WRLCK;
1095 lock_info.l_whence = SEEK_SET;
1096 lock_info.l_start = 0;
1097 lock_info.l_len = 1;
1098 rc = fcntl(env->me_lfd, F_SETLK, &lock_info);
1102 lock_info.l_type = F_RDLCK;
1103 rc = fcntl(env->me_lfd, F_SETLK, &lock_info);
1109 size = lseek(env->me_lfd, 0, SEEK_END);
1110 rsize = (env->me_maxreaders-1) * sizeof(MDB_reader) + sizeof(MDB_txninfo);
1111 if (size < rsize && *excl) {
1112 if (ftruncate(env->me_lfd, rsize) != 0) {
1118 size = rsize - sizeof(MDB_txninfo);
1119 env->me_maxreaders = size/sizeof(MDB_reader) + 1;
1121 env->me_txns = mmap(0, rsize, PROT_READ|PROT_WRITE, MAP_SHARED,
1123 if (env->me_txns == MAP_FAILED) {
1128 pthread_mutexattr_t mattr;
1130 pthread_mutexattr_init(&mattr);
1131 pthread_mutexattr_setpshared(&mattr, PTHREAD_PROCESS_SHARED);
1132 pthread_mutex_init(&env->me_txns->mt_mutex, &mattr);
1133 pthread_mutex_init(&env->me_txns->mt_wmutex, &mattr);
1134 env->me_txns->mt_version = MDB_VERSION;
1135 env->me_txns->mt_magic = MDB_MAGIC;
1136 env->me_txns->mt_txnid = 0;
1137 env->me_txns->mt_numreaders = 0;
1140 if (env->me_txns->mt_magic != MDB_MAGIC) {
1141 DPRINTF("lock region has invalid magic");
1144 if (env->me_txns->mt_version != MDB_VERSION) {
1145 DPRINTF("lock region is version %u, expected version %u",
1146 env->me_txns->mt_version, MDB_VERSION);
1149 if (errno != EACCES && errno != EAGAIN) {
1163 mdbenv_open(MDB_env *env, const char *path, unsigned int flags, mode_t mode)
1165 int oflags, rc, len, excl;
1166 char *lpath, *dpath;
1169 lpath = malloc(len + sizeof("/lock.mdb") + len + sizeof("/data.db"));
1172 dpath = lpath + len + sizeof("/lock.mdb");
1173 sprintf(lpath, "%s/lock.mdb", path);
1174 sprintf(dpath, "%s/data.mdb", path);
1176 rc = mdbenv_setup_locks(env, lpath, mode, &excl);
1180 if (F_ISSET(flags, MDB_RDONLY))
1183 oflags = O_RDWR | O_CREAT;
1185 if ((env->me_fd = open(dpath, oflags, mode)) == -1)
1188 if ((rc = mdbenv_open2(env, flags)) != MDB_SUCCESS) {
1192 env->me_path = strdup(path);
1193 DPRINTF("opened dbenv %p", (void *) env);
1194 pthread_key_create(&env->me_txkey, mdbenv_reader_dest);
1196 mdbenv_share_locks(env);
1197 env->me_dbxs = calloc(DBX_CHUNK, sizeof(MDB_dbx));
1198 env->me_dbs = calloc(DBX_CHUNK, sizeof(MDB_db *));
1200 env->me_dbs[0] = (MDB_db *)&env->me_meta.mm_psize;
1210 mdbenv_close(MDB_env *env)
1218 munmap(env->me_map, env->me_mapsize);
1222 size_t size = (env->me_maxreaders-1) * sizeof(MDB_reader) + sizeof(MDB_txninfo);
1223 munmap(env->me_txns, size);
1229 /* Search for key within a leaf page, using binary search.
1230 * Returns the smallest entry larger or equal to the key.
1231 * If exactp is non-null, stores whether the found entry was an exact match
1232 * in *exactp (1 or 0).
1233 * If kip is non-null, stores the index of the found entry in *kip.
1234 * If no entry larger or equal to the key is found, returns NULL.
1237 mdb_search_node(MDB_txn *txn, MDB_dbi dbi, MDB_page *mp, MDB_val *key,
1238 int *exactp, unsigned int *kip)
1246 DPRINTF("searching %u keys in %s page %lu",
1248 IS_LEAF(mp) ? "leaf" : "branch",
1251 assert(NUMKEYS(mp) > 0);
1253 memset(&nodekey, 0, sizeof(nodekey));
1255 low = IS_LEAF(mp) ? 0 : 1;
1256 high = NUMKEYS(mp) - 1;
1257 while (low <= high) {
1258 i = (low + high) >> 1;
1259 node = NODEPTR(mp, i);
1261 nodekey.mv_size = node->mn_ksize;
1262 nodekey.mv_data = NODEKEY(node);
1264 if (txn->mt_dbxs[dbi].md_cmp)
1265 rc = txn->mt_dbxs[dbi].md_cmp(key, &nodekey);
1267 rc = _mdb_cmp(txn, dbi, key, &nodekey);
1270 DPRINTF("found leaf index %u [%.*s], rc = %i",
1271 i, (int)nodekey.mv_size, (char *)nodekey.mv_data, rc);
1273 DPRINTF("found branch index %u [%.*s -> %lu], rc = %i",
1274 i, (int)node->mn_ksize, (char *)NODEKEY(node),
1285 if (rc > 0) { /* Found entry is less than the key. */
1286 i++; /* Skip to get the smallest entry larger than key. */
1287 if (i >= NUMKEYS(mp))
1288 /* There is no entry larger or equal to the key. */
1292 *exactp = (rc == 0);
1293 if (kip) /* Store the key index if requested. */
1296 return NODEPTR(mp, i);
1300 cursor_pop_page(MDB_cursor *cursor)
1304 top = CURSOR_TOP(cursor);
1307 DPRINTF("popped page %lu off cursor %p", top->mp_page->mp_pgno, (void *) cursor);
1313 cursor_push_page(MDB_cursor *cursor, MDB_page *mp)
1317 DPRINTF("pushing page %lu on cursor %p", mp->mp_pgno, (void *) cursor);
1319 if ((ppage = calloc(1, sizeof(*ppage))) == NULL)
1321 ppage->mp_page = mp;
1322 CURSOR_PUSH(cursor, ppage);
1327 mdbenv_get_page(MDB_env *env, pgno_t pgno)
1330 MDB_txn *txn = env->me_txn;
1333 if (txn && !SIMPLEQ_EMPTY(txn->mt_u.dirty_queue)) {
1335 SIMPLEQ_FOREACH(dp, txn->mt_u.dirty_queue, h.md_next) {
1336 if (dp->p.mp_pgno == pgno) {
1344 p = (MDB_page *)(env->me_map + env->me_meta.mm_psize * pgno);
1350 mdb_search_page_root(MDB_txn *txn, MDB_dbi dbi, MDB_val *key,
1351 MDB_cursor *cursor, int modify, MDB_pageparent *mpp)
1353 MDB_page *mp = mpp->mp_page;
1356 if (cursor && cursor_push_page(cursor, mp) == NULL)
1359 while (IS_BRANCH(mp)) {
1363 DPRINTF("branch page %lu has %u keys", mp->mp_pgno, NUMKEYS(mp));
1364 assert(NUMKEYS(mp) > 1);
1365 DPRINTF("found index 0 to page %lu", NODEPGNO(NODEPTR(mp, 0)));
1367 if (key == NULL) /* Initialize cursor to first page. */
1371 node = mdb_search_node(txn, dbi, mp, key, &exact, &i);
1373 i = NUMKEYS(mp) - 1;
1381 DPRINTF("following index %u for key %.*s",
1382 i, (int)key->mv_size, (char *)key->mv_data);
1383 assert(i < NUMKEYS(mp));
1384 node = NODEPTR(mp, i);
1387 CURSOR_TOP(cursor)->mp_ki = i;
1389 mpp->mp_parent = mp;
1390 if ((mp = mdbenv_get_page(txn->mt_env, NODEPGNO(node))) == NULL)
1395 if (cursor && cursor_push_page(cursor, mp) == NULL)
1399 MDB_dhead *dh = ((MDB_dhead *)mp)-1;
1400 if ((rc = mdb_touch(txn, mpp)) != 0)
1402 dh = ((MDB_dhead *)mpp->mp_page)-1;
1403 dh->md_parent = mpp->mp_parent;
1404 dh->md_pi = mpp->mp_pi;
1411 DPRINTF("internal error, index points to a %02X page!?",
1416 DPRINTF("found leaf page %lu for key %.*s", mp->mp_pgno,
1417 key ? (int)key->mv_size : 0, key ? (char *)key->mv_data : NULL);
1422 /* Search for the page a given key should be in.
1423 * Stores a pointer to the found page in *mpp.
1424 * If key is NULL, search for the lowest page (used by mdb_cursor_first).
1425 * If cursor is non-null, pushes parent pages on the cursor stack.
1426 * If modify is true, visited pages are updated with new page numbers.
1429 mdb_search_page(MDB_txn *txn, MDB_dbi dbi, MDB_val *key,
1430 MDB_cursor *cursor, int modify, MDB_pageparent *mpp)
1435 /* Choose which root page to start with. If a transaction is given
1436 * use the root page from the transaction, otherwise read the last
1437 * committed root page.
1439 if (F_ISSET(txn->mt_flags, MDB_TXN_ERROR)) {
1440 DPRINTF("transaction has failed, must abort");
1443 root = txn->mt_root;
1445 if (root == P_INVALID) { /* Tree is empty. */
1446 DPRINTF("tree is empty");
1450 if ((mpp->mp_page = mdbenv_get_page(txn->mt_env, root)) == NULL)
1453 DPRINTF("root page has flags 0x%X", mpp->mp_page->mp_flags);
1455 if (modify && !F_ISSET(mpp->mp_page->mp_flags, P_DIRTY)) {
1456 mpp->mp_parent = NULL;
1458 if ((rc = mdb_touch(txn, mpp)))
1460 txn->mt_root = mpp->mp_page->mp_pgno;
1463 return mdb_search_page_root(txn, dbi, key, cursor, modify, mpp);
1467 mdb_read_data(MDB_env *env, MDB_node *leaf, MDB_val *data)
1469 MDB_page *omp; /* overflow mpage */
1472 if (!F_ISSET(leaf->mn_flags, F_BIGDATA)) {
1473 data->mv_size = leaf->mn_dsize;
1474 data->mv_data = NODEDATA(leaf);
1478 /* Read overflow data.
1480 data->mv_size = leaf->mn_dsize;
1481 memcpy(&pgno, NODEDATA(leaf), sizeof(pgno));
1482 if ((omp = mdbenv_get_page(env, pgno)) == NULL) {
1483 DPRINTF("read overflow page %lu failed", pgno);
1486 data->mv_data = omp;
1492 mdb_get(MDB_txn *txn, MDB_dbi dbi,
1493 MDB_val *key, MDB_val *data)
1501 DPRINTF("===> get key [%.*s]", (int)key->mv_size, (char *)key->mv_data);
1503 if (key->mv_size == 0 || key->mv_size > MAXKEYSIZE) {
1507 if ((rc = mdb_search_page(txn, dbi, key, NULL, 0, &mpp)) != MDB_SUCCESS)
1510 leaf = mdb_search_node(txn, dbi, mpp.mp_page, key, &exact, NULL);
1512 rc = mdb_read_data(txn->mt_env, leaf, data);
1521 mdb_sibling(MDB_cursor *cursor, int move_right)
1525 MDB_ppage *parent, *top;
1528 top = CURSOR_TOP(cursor);
1529 if ((parent = SLIST_NEXT(top, mp_entry)) == NULL) {
1530 return ENOENT; /* root has no siblings */
1533 DPRINTF("parent page is page %lu, index %u",
1534 parent->mp_page->mp_pgno, parent->mp_ki);
1536 cursor_pop_page(cursor);
1537 if (move_right ? (parent->mp_ki + 1 >= NUMKEYS(parent->mp_page))
1538 : (parent->mp_ki == 0)) {
1539 DPRINTF("no more keys left, moving to %s sibling",
1540 move_right ? "right" : "left");
1541 if ((rc = mdb_sibling(cursor, move_right)) != MDB_SUCCESS)
1543 parent = CURSOR_TOP(cursor);
1549 DPRINTF("just moving to %s index key %u",
1550 move_right ? "right" : "left", parent->mp_ki);
1552 assert(IS_BRANCH(parent->mp_page));
1554 indx = NODEPTR(parent->mp_page, parent->mp_ki);
1555 if ((mp = mdbenv_get_page(cursor->mc_txn->mt_env, indx->mn_pgno)) == NULL)
1558 mp->parent = parent->mp_page;
1559 mp->parent_index = parent->mp_ki;
1562 cursor_push_page(cursor, mp);
1568 mdb_set_key(MDB_node *node, MDB_val *key)
1573 key->mv_size = node->mn_ksize;
1574 key->mv_data = NODEKEY(node);
1580 mdb_cursor_next(MDB_cursor *cursor, MDB_val *key, MDB_val *data)
1586 if (cursor->mc_eof) {
1590 assert(cursor->mc_initialized);
1592 top = CURSOR_TOP(cursor);
1595 DPRINTF("cursor_next: top page is %lu in cursor %p", mp->mp_pgno, (void *) cursor);
1597 if (top->mp_ki + 1 >= NUMKEYS(mp)) {
1598 DPRINTF("=====> move to next sibling page");
1599 if (mdb_sibling(cursor, 1) != MDB_SUCCESS) {
1603 top = CURSOR_TOP(cursor);
1605 DPRINTF("next page is %lu, key index %u", mp->mp_pgno, top->mp_ki);
1609 DPRINTF("==> cursor points to page %lu with %u keys, key index %u",
1610 mp->mp_pgno, NUMKEYS(mp), top->mp_ki);
1612 assert(IS_LEAF(mp));
1613 leaf = NODEPTR(mp, top->mp_ki);
1615 if (data && mdb_read_data(cursor->mc_txn->mt_env, leaf, data) != MDB_SUCCESS)
1618 return mdb_set_key(leaf, key);
1622 mdb_cursor_set(MDB_cursor *cursor, MDB_val *key, MDB_val *data,
1632 assert(key->mv_size > 0);
1634 rc = mdb_search_page(cursor->mc_txn, cursor->mc_dbi, key, cursor, 0, &mpp);
1635 if (rc != MDB_SUCCESS)
1637 assert(IS_LEAF(mpp.mp_page));
1639 top = CURSOR_TOP(cursor);
1640 leaf = mdb_search_node(cursor->mc_txn, cursor->mc_dbi, mpp.mp_page, key, exactp, &top->mp_ki);
1641 if (exactp != NULL && !*exactp) {
1642 /* MDB_CURSOR_EXACT specified and not an exact match. */
1647 DPRINTF("===> inexact leaf not found, goto sibling");
1648 if ((rc = mdb_sibling(cursor, 1)) != MDB_SUCCESS)
1649 return rc; /* no entries matched */
1650 top = CURSOR_TOP(cursor);
1652 mpp.mp_page = top->mp_page;
1653 assert(IS_LEAF(mpp.mp_page));
1654 leaf = NODEPTR(mpp.mp_page, 0);
1657 cursor->mc_initialized = 1;
1660 if (data && (rc = mdb_read_data(cursor->mc_txn->mt_env, leaf, data)) != MDB_SUCCESS)
1663 rc = mdb_set_key(leaf, key);
1664 if (rc == MDB_SUCCESS) {
1665 DPRINTF("==> cursor placed on key %.*s",
1666 (int)key->mv_size, (char *)key->mv_data);
1674 mdb_cursor_first(MDB_cursor *cursor, MDB_val *key, MDB_val *data)
1680 rc = mdb_search_page(cursor->mc_txn, cursor->mc_dbi, NULL, cursor, 0, &mpp);
1681 if (rc != MDB_SUCCESS)
1683 assert(IS_LEAF(mpp.mp_page));
1685 leaf = NODEPTR(mpp.mp_page, 0);
1686 cursor->mc_initialized = 1;
1689 if (data && (rc = mdb_read_data(cursor->mc_txn->mt_env, leaf, data)) != MDB_SUCCESS)
1692 return mdb_set_key(leaf, key);
1696 mdb_cursor_get(MDB_cursor *cursor, MDB_val *key, MDB_val *data,
1706 case MDB_CURSOR_EXACT:
1707 while (CURSOR_TOP(cursor) != NULL)
1708 cursor_pop_page(cursor);
1709 if (key == NULL || key->mv_size == 0 || key->mv_size > MAXKEYSIZE) {
1711 } else if (op == MDB_CURSOR_EXACT)
1712 rc = mdb_cursor_set(cursor, key, data, &exact);
1714 rc = mdb_cursor_set(cursor, key, data, NULL);
1717 if (!cursor->mc_initialized)
1718 rc = mdb_cursor_first(cursor, key, data);
1720 rc = mdb_cursor_next(cursor, key, data);
1723 while (CURSOR_TOP(cursor) != NULL)
1724 cursor_pop_page(cursor);
1725 rc = mdb_cursor_first(cursor, key, data);
1728 DPRINTF("unhandled/unimplemented cursor operation %u", op);
1736 /* Allocate a page and initialize it
1739 mdb_new_page(MDB_txn *txn, MDB_dbi dbi, uint32_t flags, int num)
1743 if ((dp = mdb_alloc_page(txn, NULL, 0, num)) == NULL)
1745 DPRINTF("allocated new mpage %lu, page size %u",
1746 dp->p.mp_pgno, txn->mt_env->me_meta.mm_psize);
1747 dp->p.mp_flags = flags | P_DIRTY;
1748 dp->p.mp_lower = PAGEHDRSZ;
1749 dp->p.mp_upper = txn->mt_env->me_meta.mm_psize;
1751 if (IS_BRANCH(&dp->p))
1752 txn->mt_dbs[dbi]->md_branch_pages++;
1753 else if (IS_LEAF(&dp->p))
1754 txn->mt_dbs[dbi]->md_leaf_pages++;
1755 else if (IS_OVERFLOW(&dp->p)) {
1756 txn->mt_dbs[dbi]->md_overflow_pages += num;
1757 dp->p.mp_pages = num;
1764 mdb_leaf_size(MDB_env *env, MDB_val *key, MDB_val *data)
1768 sz = LEAFSIZE(key, data);
1769 if (data->mv_size >= env->me_meta.mm_psize / MDB_MINKEYS) {
1770 /* put on overflow page */
1771 sz -= data->mv_size - sizeof(pgno_t);
1774 return sz + sizeof(indx_t);
1778 mdb_branch_size(MDB_env *env, MDB_val *key)
1783 if (sz >= env->me_meta.mm_psize / MDB_MINKEYS) {
1784 /* put on overflow page */
1785 /* not implemented */
1786 /* sz -= key->size - sizeof(pgno_t); */
1789 return sz + sizeof(indx_t);
1793 mdb_add_node(MDB_txn *txn, MDB_dbi dbi, MDB_page *mp, indx_t indx,
1794 MDB_val *key, MDB_val *data, pgno_t pgno, uint8_t flags)
1797 size_t node_size = NODESIZE;
1800 MDB_dpage *ofp = NULL; /* overflow page */
1802 assert(mp->mp_upper >= mp->mp_lower);
1804 DPRINTF("add node [%.*s] to %s page %lu at index %i, key size %zu",
1805 key ? (int)key->mv_size : 0, key ? (char *)key->mv_data : NULL,
1806 IS_LEAF(mp) ? "leaf" : "branch",
1807 mp->mp_pgno, indx, key ? key->mv_size : 0);
1810 node_size += key->mv_size;
1814 if (F_ISSET(flags, F_BIGDATA)) {
1815 /* Data already on overflow page. */
1816 node_size += sizeof(pgno_t);
1817 } else if (data->mv_size >= txn->mt_env->me_meta.mm_psize / MDB_MINKEYS) {
1818 int ovpages = OVPAGES(data->mv_size, txn->mt_env->me_meta.mm_psize);
1819 /* Put data on overflow page. */
1820 DPRINTF("data size is %zu, put on overflow page",
1822 node_size += sizeof(pgno_t);
1823 if ((ofp = mdb_new_page(txn, dbi, P_OVERFLOW, ovpages)) == NULL)
1825 DPRINTF("allocated overflow page %lu", ofp->p.mp_pgno);
1828 node_size += data->mv_size;
1832 if (node_size + sizeof(indx_t) > SIZELEFT(mp)) {
1833 DPRINTF("not enough room in page %lu, got %u ptrs",
1834 mp->mp_pgno, NUMKEYS(mp));
1835 DPRINTF("upper - lower = %u - %u = %u", mp->mp_upper, mp->mp_lower,
1836 mp->mp_upper - mp->mp_lower);
1837 DPRINTF("node size = %zu", node_size);
1841 /* Move higher pointers up one slot. */
1842 for (i = NUMKEYS(mp); i > indx; i--)
1843 mp->mp_ptrs[i] = mp->mp_ptrs[i - 1];
1845 /* Adjust free space offsets. */
1846 ofs = mp->mp_upper - node_size;
1847 assert(ofs >= mp->mp_lower + sizeof(indx_t));
1848 mp->mp_ptrs[indx] = ofs;
1850 mp->mp_lower += sizeof(indx_t);
1852 /* Write the node data. */
1853 node = NODEPTR(mp, indx);
1854 node->mn_ksize = (key == NULL) ? 0 : key->mv_size;
1855 node->mn_flags = flags;
1857 node->mn_dsize = data->mv_size;
1859 node->mn_pgno = pgno;
1862 memcpy(NODEKEY(node), key->mv_data, key->mv_size);
1867 if (F_ISSET(flags, F_BIGDATA))
1868 memcpy(node->mn_data + key->mv_size, data->mv_data,
1871 memcpy(node->mn_data + key->mv_size, data->mv_data,
1874 memcpy(node->mn_data + key->mv_size, &ofp->p.mp_pgno,
1876 memcpy(METADATA(&ofp->p), data->mv_data, data->mv_size);
1884 mdb_del_node(MDB_page *mp, indx_t indx)
1887 indx_t i, j, numkeys, ptr;
1891 DPRINTF("delete node %u on %s page %lu", indx,
1892 IS_LEAF(mp) ? "leaf" : "branch", mp->mp_pgno);
1893 assert(indx < NUMKEYS(mp));
1895 node = NODEPTR(mp, indx);
1896 sz = NODESIZE + node->mn_ksize;
1898 if (F_ISSET(node->mn_flags, F_BIGDATA))
1899 sz += sizeof(pgno_t);
1901 sz += NODEDSZ(node);
1904 ptr = mp->mp_ptrs[indx];
1905 numkeys = NUMKEYS(mp);
1906 for (i = j = 0; i < numkeys; i++) {
1908 mp->mp_ptrs[j] = mp->mp_ptrs[i];
1909 if (mp->mp_ptrs[i] < ptr)
1910 mp->mp_ptrs[j] += sz;
1915 base = (char *)mp + mp->mp_upper;
1916 memmove(base + sz, base, ptr - mp->mp_upper);
1918 mp->mp_lower -= sizeof(indx_t);
1923 mdb_cursor_open(MDB_txn *txn, MDB_dbi dbi, MDB_cursor **ret)
1927 if (txn == NULL || ret == NULL)
1930 if ((cursor = calloc(1, sizeof(*cursor))) != NULL) {
1931 SLIST_INIT(&cursor->mc_stack);
1932 cursor->mc_dbi = dbi;
1933 cursor->mc_txn = txn;
1942 mdb_cursor_close(MDB_cursor *cursor)
1944 if (cursor != NULL) {
1945 while (!CURSOR_EMPTY(cursor))
1946 cursor_pop_page(cursor);
1948 /* btree_close(cursor->bt); */
1954 mdb_update_key(MDB_page *mp, indx_t indx, MDB_val *key)
1956 indx_t ptr, i, numkeys;
1962 node = NODEPTR(mp, indx);
1963 ptr = mp->mp_ptrs[indx];
1964 DPRINTF("update key %u (ofs %u) [%.*s] to [%.*s] on page %lu",
1966 (int)node->mn_ksize, (char *)NODEKEY(node),
1967 (int)key->mv_size, (char *)key->mv_data,
1970 delta = key->mv_size - node->mn_ksize;
1972 if (delta > 0 && SIZELEFT(mp) < delta) {
1973 DPRINTF("OUCH! Not enough room, delta = %d", delta);
1977 numkeys = NUMKEYS(mp);
1978 for (i = 0; i < numkeys; i++) {
1979 if (mp->mp_ptrs[i] <= ptr)
1980 mp->mp_ptrs[i] -= delta;
1983 base = (char *)mp + mp->mp_upper;
1984 len = ptr - mp->mp_upper + NODESIZE;
1985 memmove(base - delta, base, len);
1986 mp->mp_upper -= delta;
1988 node = NODEPTR(mp, indx);
1989 node->mn_ksize = key->mv_size;
1992 memcpy(NODEKEY(node), key->mv_data, key->mv_size);
1997 /* Move a node from src to dst.
2000 mdb_move_node(MDB_txn *txn, MDB_dbi dbi, MDB_pageparent *src, indx_t srcindx,
2001 MDB_pageparent *dst, indx_t dstindx)
2007 srcnode = NODEPTR(src->mp_page, srcindx);
2008 DPRINTF("moving %s node %u [%.*s] on page %lu to node %u on page %lu",
2009 IS_LEAF(src->mp_page) ? "leaf" : "branch",
2011 (int)srcnode->mn_ksize, (char *)NODEKEY(srcnode),
2012 src->mp_page->mp_pgno,
2013 dstindx, dst->mp_page->mp_pgno);
2015 /* Mark src and dst as dirty. */
2016 if ((rc = mdb_touch(txn, src)) ||
2017 (rc = mdb_touch(txn, dst)))
2020 /* Add the node to the destination page.
2022 key.mv_size = srcnode->mn_ksize;
2023 key.mv_data = NODEKEY(srcnode);
2024 data.mv_size = NODEDSZ(srcnode);
2025 data.mv_data = NODEDATA(srcnode);
2026 rc = mdb_add_node(txn, dbi, dst->mp_page, dstindx, &key, &data, NODEPGNO(srcnode),
2028 if (rc != MDB_SUCCESS)
2031 /* Delete the node from the source page.
2033 mdb_del_node(src->mp_page, srcindx);
2035 /* Update the parent separators.
2037 if (srcindx == 0 && src->mp_pi != 0) {
2038 DPRINTF("update separator for source page %lu to [%.*s]",
2039 src->mp_page->mp_pgno, (int)key.mv_size, (char *)key.mv_data);
2040 if ((rc = mdb_update_key(src->mp_parent, src->mp_pi,
2041 &key)) != MDB_SUCCESS)
2045 if (srcindx == 0 && IS_BRANCH(src->mp_page)) {
2047 nullkey.mv_size = 0;
2048 assert(mdb_update_key(src->mp_page, 0, &nullkey) == MDB_SUCCESS);
2051 if (dstindx == 0 && dst->mp_pi != 0) {
2052 DPRINTF("update separator for destination page %lu to [%.*s]",
2053 dst->mp_page->mp_pgno, (int)key.mv_size, (char *)key.mv_data);
2054 if ((rc = mdb_update_key(dst->mp_parent, dst->mp_pi,
2055 &key)) != MDB_SUCCESS)
2059 if (dstindx == 0 && IS_BRANCH(dst->mp_page)) {
2061 nullkey.mv_size = 0;
2062 assert(mdb_update_key(dst->mp_page, 0, &nullkey) == MDB_SUCCESS);
2069 mdb_merge(MDB_txn *txn, MDB_dbi dbi, MDB_pageparent *src, MDB_pageparent *dst)
2078 DPRINTF("merging page %lu and %lu", src->mp_page->mp_pgno, dst->mp_page->mp_pgno);
2080 assert(txn != NULL);
2081 assert(src->mp_parent); /* can't merge root page */
2082 assert(dst->mp_parent);
2084 /* Mark src and dst as dirty. */
2085 if ((rc = mdb_touch(txn, src)) ||
2086 (rc = mdb_touch(txn, dst)))
2089 /* Move all nodes from src to dst.
2091 for (i = 0; i < NUMKEYS(src->mp_page); i++) {
2092 srcnode = NODEPTR(src->mp_page, i);
2094 key.mv_size = srcnode->mn_ksize;
2095 key.mv_data = NODEKEY(srcnode);
2096 data.mv_size = NODEDSZ(srcnode);
2097 data.mv_data = NODEDATA(srcnode);
2098 rc = mdb_add_node(txn, dbi, dst->mp_page, NUMKEYS(dst->mp_page), &key,
2099 &data, NODEPGNO(srcnode), srcnode->mn_flags);
2100 if (rc != MDB_SUCCESS)
2104 DPRINTF("dst page %lu now has %u keys (%.1f%% filled)",
2105 dst->mp_page->mp_pgno, NUMKEYS(dst->mp_page), (float)PAGEFILL(txn->mt_env, dst->mp_page) / 10);
2107 /* Unlink the src page from parent.
2109 mdb_del_node(src->mp_parent, src->mp_pi);
2110 if (src->mp_pi == 0) {
2112 if ((rc = mdb_update_key(src->mp_parent, 0, &key)) != MDB_SUCCESS)
2116 if (IS_LEAF(src->mp_page))
2117 txn->mt_dbs[dbi]->md_leaf_pages--;
2119 txn->mt_dbs[dbi]->md_branch_pages--;
2121 mpp.mp_page = src->mp_parent;
2122 dh = (MDB_dhead *)src->mp_parent;
2124 mpp.mp_parent = dh->md_parent;
2125 mpp.mp_pi = dh->md_pi;
2127 return mdb_rebalance(txn, dbi, &mpp);
2130 #define FILL_THRESHOLD 250
2133 mdb_rebalance(MDB_txn *txn, MDB_dbi dbi, MDB_pageparent *mpp)
2138 indx_t si = 0, di = 0;
2140 assert(txn != NULL);
2141 assert(mpp != NULL);
2143 DPRINTF("rebalancing %s page %lu (has %u keys, %.1f%% full)",
2144 IS_LEAF(mpp->mp_page) ? "leaf" : "branch",
2145 mpp->mp_page->mp_pgno, NUMKEYS(mpp->mp_page), (float)PAGEFILL(txn->mt_env, mpp->mp_page) / 10);
2147 if (PAGEFILL(txn->mt_env, mpp->mp_page) >= FILL_THRESHOLD) {
2148 DPRINTF("no need to rebalance page %lu, above fill threshold",
2149 mpp->mp_page->mp_pgno);
2153 if (mpp->mp_parent == NULL) {
2154 if (NUMKEYS(mpp->mp_page) == 0) {
2155 DPRINTF("tree is completely empty");
2156 txn->mt_dbs[dbi]->md_root = P_INVALID;
2157 txn->mt_dbs[dbi]->md_depth--;
2158 txn->mt_dbs[dbi]->md_leaf_pages--;
2159 } else if (IS_BRANCH(mpp->mp_page) && NUMKEYS(mpp->mp_page) == 1) {
2160 DPRINTF("collapsing root page!");
2161 txn->mt_dbs[dbi]->md_root = NODEPGNO(NODEPTR(mpp->mp_page, 0));
2162 if ((root = mdbenv_get_page(txn->mt_env, txn->mt_dbs[dbi]->md_root)) == NULL)
2164 txn->mt_dbs[dbi]->md_depth--;
2165 txn->mt_dbs[dbi]->md_branch_pages--;
2167 DPRINTF("root page doesn't need rebalancing");
2171 /* The parent (branch page) must have at least 2 pointers,
2172 * otherwise the tree is invalid.
2174 assert(NUMKEYS(mpp->mp_parent) > 1);
2176 /* Leaf page fill factor is below the threshold.
2177 * Try to move keys from left or right neighbor, or
2178 * merge with a neighbor page.
2183 if (mpp->mp_pi == 0) {
2184 /* We're the leftmost leaf in our parent.
2186 DPRINTF("reading right neighbor");
2187 node = NODEPTR(mpp->mp_parent, mpp->mp_pi + 1);
2188 if ((npp.mp_page = mdbenv_get_page(txn->mt_env, NODEPGNO(node))) == NULL)
2190 npp.mp_pi = mpp->mp_pi + 1;
2192 di = NUMKEYS(mpp->mp_page);
2194 /* There is at least one neighbor to the left.
2196 DPRINTF("reading left neighbor");
2197 node = NODEPTR(mpp->mp_parent, mpp->mp_pi - 1);
2198 if ((npp.mp_page = mdbenv_get_page(txn->mt_env, NODEPGNO(node))) == NULL)
2200 npp.mp_pi = mpp->mp_pi - 1;
2201 si = NUMKEYS(npp.mp_page) - 1;
2204 npp.mp_parent = mpp->mp_parent;
2206 DPRINTF("found neighbor page %lu (%u keys, %.1f%% full)",
2207 npp.mp_page->mp_pgno, NUMKEYS(npp.mp_page), (float)PAGEFILL(txn->mt_env, npp.mp_page) / 10);
2209 /* If the neighbor page is above threshold and has at least two
2210 * keys, move one key from it.
2212 * Otherwise we should try to merge them.
2214 if (PAGEFILL(txn->mt_env, npp.mp_page) >= FILL_THRESHOLD && NUMKEYS(npp.mp_page) >= 2)
2215 return mdb_move_node(txn, dbi, &npp, si, mpp, di);
2216 else { /* FIXME: if (has_enough_room()) */
2217 if (mpp->mp_pi == 0)
2218 return mdb_merge(txn, dbi, &npp, mpp);
2220 return mdb_merge(txn, dbi, mpp, &npp);
2225 mdb_del(MDB_txn *txn, MDB_dbi dbi,
2226 MDB_val *key, MDB_val *data)
2233 DPRINTF("========> delete key %.*s", (int)key->mv_size, (char *)key->mv_data);
2235 assert(key != NULL);
2237 if (txn == NULL || dbi >= txn->mt_numdbs)
2240 if (F_ISSET(txn->mt_flags, MDB_TXN_RDONLY)) {
2244 if (key->mv_size == 0 || key->mv_size > MAXKEYSIZE) {
2248 if ((rc = mdb_search_page(txn, dbi, key, NULL, 1, &mpp)) != MDB_SUCCESS)
2251 leaf = mdb_search_node(txn, dbi, mpp.mp_page, key, &exact, &ki);
2252 if (leaf == NULL || !exact) {
2256 if (data && (rc = mdb_read_data(txn->mt_env, leaf, data)) != MDB_SUCCESS)
2259 mdb_del_node(mpp.mp_page, ki);
2260 /* add overflow pages to free list */
2261 if (F_ISSET(leaf->mn_flags, F_BIGDATA)) {
2265 memcpy(&pg, NODEDATA(leaf), sizeof(pg));
2266 ovpages = OVPAGES(NODEDSZ(leaf), txn->mt_env->me_meta.mm_psize);
2267 for (i=0; i<ovpages; i++) {
2268 mdb_idl_insert(txn->mt_free_pgs, pg);
2272 txn->mt_dbs[dbi]->md_entries--;
2273 rc = mdb_rebalance(txn, dbi, &mpp);
2274 if (rc != MDB_SUCCESS)
2275 txn->mt_flags |= MDB_TXN_ERROR;
2280 /* Split page <*mpp>, and insert <key,(data|newpgno)> in either left or
2281 * right sibling, at index <*newindxp> (as if unsplit). Updates *mpp and
2282 * *newindxp with the actual values after split, ie if *mpp and *newindxp
2283 * refer to a node in the new right sibling page.
2286 mdb_split(MDB_txn *txn, MDB_dbi dbi, MDB_page **mpp, unsigned int *newindxp,
2287 MDB_val *newkey, MDB_val *newdata, pgno_t newpgno)
2290 int rc = MDB_SUCCESS, ins_new = 0;
2293 unsigned int i, j, split_indx;
2295 MDB_val sepkey, rkey, rdata;
2297 MDB_dpage *mdp, *rdp, *pdp;
2300 assert(txn != NULL);
2302 dh = ((MDB_dhead *)*mpp) - 1;
2303 mdp = (MDB_dpage *)dh;
2304 newindx = *newindxp;
2306 DPRINTF("-----> splitting %s page %lu and adding [%.*s] at index %i",
2307 IS_LEAF(&mdp->p) ? "leaf" : "branch", mdp->p.mp_pgno,
2308 (int)newkey->mv_size, (char *)newkey->mv_data, *newindxp);
2310 if (mdp->h.md_parent == NULL) {
2311 if ((pdp = mdb_new_page(txn, dbi, P_BRANCH, 1)) == NULL)
2314 mdp->h.md_parent = &pdp->p;
2315 txn->mt_dbs[dbi]->md_root = pdp->p.mp_pgno;
2317 txn->mt_root = pdp->p.mp_pgno;
2318 DPRINTF("root split! new root = %lu", pdp->p.mp_pgno);
2319 txn->mt_dbs[dbi]->md_depth++;
2321 /* Add left (implicit) pointer. */
2322 if (mdb_add_node(txn, dbi, &pdp->p, 0, NULL, NULL,
2323 mdp->p.mp_pgno, 0) != MDB_SUCCESS)
2326 DPRINTF("parent branch page is %lu", mdp->h.md_parent->mp_pgno);
2329 /* Create a right sibling. */
2330 if ((rdp = mdb_new_page(txn, dbi, mdp->p.mp_flags, 1)) == NULL)
2332 rdp->h.md_parent = mdp->h.md_parent;
2333 rdp->h.md_pi = mdp->h.md_pi + 1;
2334 DPRINTF("new right sibling: page %lu", rdp->p.mp_pgno);
2336 /* Move half of the keys to the right sibling. */
2337 if ((copy = malloc(txn->mt_env->me_meta.mm_psize)) == NULL)
2339 memcpy(copy, &mdp->p, txn->mt_env->me_meta.mm_psize);
2340 memset(&mdp->p.mp_ptrs, 0, txn->mt_env->me_meta.mm_psize - PAGEHDRSZ);
2341 mdp->p.mp_lower = PAGEHDRSZ;
2342 mdp->p.mp_upper = txn->mt_env->me_meta.mm_psize;
2344 split_indx = NUMKEYS(copy) / 2 + 1;
2346 /* First find the separating key between the split pages.
2348 memset(&sepkey, 0, sizeof(sepkey));
2349 if (newindx == split_indx) {
2350 sepkey.mv_size = newkey->mv_size;
2351 sepkey.mv_data = newkey->mv_data;
2353 node = NODEPTR(copy, split_indx);
2354 sepkey.mv_size = node->mn_ksize;
2355 sepkey.mv_data = NODEKEY(node);
2358 DPRINTF("separator is [%.*s]", (int)sepkey.mv_size, (char *)sepkey.mv_data);
2360 /* Copy separator key to the parent.
2362 if (SIZELEFT(rdp->h.md_parent) < mdb_branch_size(txn->mt_env, &sepkey)) {
2363 rc = mdb_split(txn, dbi, &rdp->h.md_parent, &rdp->h.md_pi,
2364 &sepkey, NULL, rdp->p.mp_pgno);
2366 /* Right page might now have changed parent.
2367 * Check if left page also changed parent.
2369 if (rdp->h.md_parent != mdp->h.md_parent &&
2370 mdp->h.md_pi >= NUMKEYS(mdp->h.md_parent)) {
2371 mdp->h.md_parent = rdp->h.md_parent;
2372 mdp->h.md_pi = rdp->h.md_pi - 1;
2375 rc = mdb_add_node(txn, dbi, rdp->h.md_parent, rdp->h.md_pi,
2376 &sepkey, NULL, rdp->p.mp_pgno, 0);
2378 if (rc != MDB_SUCCESS) {
2383 for (i = j = 0; i <= NUMKEYS(copy); j++) {
2384 if (i < split_indx) {
2385 /* Re-insert in left sibling. */
2388 /* Insert in right sibling. */
2389 if (i == split_indx)
2390 /* Reset insert index for right sibling. */
2391 j = (i == newindx && ins_new);
2395 if (i == newindx && !ins_new) {
2396 /* Insert the original entry that caused the split. */
2397 rkey.mv_data = newkey->mv_data;
2398 rkey.mv_size = newkey->mv_size;
2399 if (IS_LEAF(&mdp->p)) {
2400 rdata.mv_data = newdata->mv_data;
2401 rdata.mv_size = newdata->mv_size;
2408 /* Update page and index for the new key. */
2411 } else if (i == NUMKEYS(copy)) {
2414 node = NODEPTR(copy, i);
2415 rkey.mv_data = NODEKEY(node);
2416 rkey.mv_size = node->mn_ksize;
2417 if (IS_LEAF(&mdp->p)) {
2418 rdata.mv_data = NODEDATA(node);
2419 rdata.mv_size = node->mn_dsize;
2421 pgno = node->mn_pgno;
2422 flags = node->mn_flags;
2427 if (!IS_LEAF(&mdp->p) && j == 0) {
2428 /* First branch index doesn't need key data. */
2432 rc = mdb_add_node(txn, dbi, &pdp->p, j, &rkey, &rdata, pgno,flags);
2440 mdb_put(MDB_txn *txn, MDB_dbi dbi,
2441 MDB_val *key, MDB_val *data, unsigned int flags)
2443 int rc = MDB_SUCCESS, exact;
2448 assert(key != NULL);
2449 assert(data != NULL);
2454 if (F_ISSET(txn->mt_flags, MDB_TXN_RDONLY)) {
2458 if (txn->mt_env->me_txn != txn) {
2462 if (key->mv_size == 0 || key->mv_size > MAXKEYSIZE) {
2466 DPRINTF("==> put key %.*s, size %zu, data size %zu",
2467 (int)key->mv_size, (char *)key->mv_data, key->mv_size, data->mv_size);
2469 rc = mdb_search_page(txn, dbi, key, NULL, 1, &mpp);
2470 if (rc == MDB_SUCCESS) {
2471 leaf = mdb_search_node(txn, dbi, mpp.mp_page, key, &exact, &ki);
2472 if (leaf && exact) {
2473 if (F_ISSET(flags, MDB_NOOVERWRITE)) {
2474 DPRINTF("duplicate key %.*s",
2475 (int)key->mv_size, (char *)key->mv_data);
2478 mdb_del_node(mpp.mp_page, ki);
2480 if (leaf == NULL) { /* append if not found */
2481 ki = NUMKEYS(mpp.mp_page);
2482 DPRINTF("appending key at index %i", ki);
2484 } else if (rc == ENOENT) {
2486 /* new file, just write a root leaf page */
2487 DPRINTF("allocating new root leaf page");
2488 if ((dp = mdb_new_page(txn, dbi, P_LEAF, 1)) == NULL) {
2491 mpp.mp_page = &dp->p;
2492 txn->mt_dbs[dbi]->md_root = mpp.mp_page->mp_pgno;
2494 txn->mt_root = mpp.mp_page->mp_pgno;
2495 txn->mt_dbs[dbi]->md_depth++;
2501 assert(IS_LEAF(mpp.mp_page));
2502 DPRINTF("there are %u keys, should insert new key at index %i",
2503 NUMKEYS(mpp.mp_page), ki);
2505 if (SIZELEFT(mpp.mp_page) < mdb_leaf_size(txn->mt_env, key, data)) {
2506 rc = mdb_split(txn, dbi, &mpp.mp_page, &ki, key, data, P_INVALID);
2508 /* There is room already in this leaf page. */
2509 rc = mdb_add_node(txn, dbi, mpp.mp_page, ki, key, data, 0, 0);
2512 if (rc != MDB_SUCCESS)
2513 txn->mt_flags |= MDB_TXN_ERROR;
2515 txn->mt_dbs[dbi]->md_entries++;
2522 mdbenv_get_flags(MDB_env *env, unsigned int *arg)
2527 *arg = env->me_flags;
2532 mdbenv_get_path(MDB_env *env, const char **arg)
2537 *arg = env->me_path;
2542 mdbenv_stat(MDB_env *env, MDB_stat *arg)
2544 if (env == NULL || arg == NULL)
2547 arg->ms_psize = env->me_meta.mm_psize;
2548 arg->ms_depth = env->me_meta.mm_depth;
2549 arg->ms_branch_pages = env->me_meta.mm_branch_pages;
2550 arg->ms_leaf_pages = env->me_meta.mm_leaf_pages;
2551 arg->ms_overflow_pages = env->me_meta.mm_overflow_pages;
2552 arg->ms_entries = env->me_meta.mm_entries;
2557 int mdb_open(MDB_txn *txn, const char *name, unsigned int flags, MDB_dbi *dbi)
2569 /* Is the DB already open? */
2570 for (i=0; i<txn->mt_numdbs; i++) {
2571 if (!strcmp(name, txn->mt_dbxs[i].md_name)) {
2577 /* Find the DB info */
2578 key.mv_size = strlen(name);
2579 key.mv_data = (void *)name;
2580 rc = mdb_get(txn, 0, &key, &data);
2582 /* Create if requested */
2583 if (rc == ENOENT && (flags & MDB_CREATE)) {
2585 data.mv_size = sizeof(MDB_db);
2586 data.mv_data = &dummy;
2587 memset(&dummy, 0, sizeof(dummy));
2588 dummy.md_root = P_INVALID;
2589 rc = mdb_put(txn, 0, &key, &data, 0);
2590 if (rc == MDB_SUCCESS)
2591 rc = mdb_get(txn, 0, &key, &data);
2594 /* OK, got info, add to table */
2595 if (rc == MDB_SUCCESS) {
2596 /* Is there a free slot? */
2597 if ((txn->mt_numdbs & (DBX_CHUNK-1)) == 0) {
2601 i = txn->mt_numdbs + DBX_CHUNK;
2602 p1 = realloc(txn->mt_dbxs, i * sizeof(MDB_dbx));
2606 p2 = realloc(txn->mt_dbs, i * sizeof(MDB_db *));
2611 txn->mt_dbxs[txn->mt_numdbs].md_name = strdup(name);
2612 txn->mt_dbxs[txn->mt_numdbs].md_cmp = NULL;
2613 txn->mt_dbxs[txn->mt_numdbs].md_rel = NULL;
2614 txn->mt_dbs[txn->mt_numdbs] = data.mv_data;
2615 *dbi = txn->mt_numdbs;
2622 int mdb_stat(MDB_txn *txn, MDB_dbi dbi, MDB_stat *arg)
2624 if (txn == NULL || arg == NULL)
2627 arg->ms_psize = txn->mt_env->me_meta.mm_psize;
2628 arg->ms_depth = txn->mt_dbs[dbi]->md_depth;
2629 arg->ms_branch_pages = txn->mt_dbs[dbi]->md_branch_pages;
2630 arg->ms_leaf_pages = txn->mt_dbs[dbi]->md_leaf_pages;
2631 arg->ms_overflow_pages = txn->mt_dbs[dbi]->md_overflow_pages;
2632 arg->ms_entries = txn->mt_dbs[dbi]->md_entries;
2637 void mdb_close(MDB_txn *txn, MDB_dbi dbi)
2639 if (dbi >= txn->mt_numdbs)
2641 free(txn->mt_dbxs[dbi].md_name);
2642 txn->mt_dbxs[dbi].md_name = NULL;