2 Bacula® - The Network Backup Solution
4 Copyright (C) 2009-2011 Free Software Foundation Europe e.V.
6 The main author of Bacula is Kern Sibbald, with contributions from
7 many others, a complete list can be found in the file AUTHORS.
8 This program is Free Software; you can redistribute it and/or
9 modify it under the terms of version three of the GNU Affero General Public
10 License as published by the Free Software Foundation and included
13 This program is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 General Public License for more details.
18 You should have received a copy of the GNU Affero General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23 Bacula® is a registered trademark of Kern Sibbald.
24 The licensor of Bacula is the Free Software Foundation Europe
25 (FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich,
26 Switzerland, email:ftf@fsfeurope.org.
30 * This code implements a cache with the current mounted filesystems for which
31 * its uses the mostly in kernel mount information and export the different OS
32 * specific interfaces using a generic interface. We use a hashed cache which is
33 * accessed using a hash on the device id and we keep the previous cache hit as
34 * most of the time we get called quite a lot with most of the time the same
35 * device so keeping the previous cache hit we have a very optimized code path.
37 * This interface is implemented for the following OS-es:
47 * Currently we only use this code for Linux and OSF1 based fstype determination.
48 * For the other OS-es we can use the fstype present in stat structure on those OS-es.
50 * This code replaces the big switch we used before based on SUPER_MAGIC present in
51 * the statfs(2) structure but which need extra code for each new filesystem added to
52 * the OS and for Linux that tends to be often as it has quite some different filesystems.
53 * This new implementation should eliminate this as we use the Linux /proc/mounts in kernel
54 * data which automatically adds any new filesystem when added to the kernel.
58 * Marco van Wieringen, August 2009
62 #include "mntent_cache.h"
67 #include <sys/types.h>
70 #if defined(HAVE_GETMNTENT)
71 #if defined(HAVE_LINUX_OS) || defined(HAVE_HPUX_OS) || defined(HAVE_AIX_OS)
73 #elif defined(HAVE_SUN_OS)
74 #include <sys/mnttab.h>
75 #endif /* HAVE_GETMNTENT */
76 #elif defined(HAVE_GETMNTINFO)
77 #if defined(HAVE_OPENBSD_OS)
78 #include <sys/param.h>
79 #include <sys/mount.h>
80 #elif defined(HAVE_NETBSD_OS)
81 #include <sys/types.h>
82 #include <sys/statvfs.h>
84 #include <sys/param.h>
85 #include <sys/ucred.h>
86 #include <sys/mount.h>
88 #elif defined(HAVE_AIX_OS)
91 #elif defined(HAVE_OSF1_OS)
92 #include <sys/mount.h>
96 * Protected data by mutex lock.
98 static pthread_mutex_t mntent_cache_lock = PTHREAD_MUTEX_INITIALIZER;
99 static mntent_cache_entry_t *previous_cache_hit = NULL;
100 static htable *mntent_cache_entry_hashtable = NULL;
103 * Last time a rescan of the mountlist took place.
105 static time_t last_rescan = 0;
107 static const char *skipped_fs_types[] = {
108 #if defined(HAVE_LINUX_OS)
115 * Add a new entry to the cache.
116 * This function should be called with a write lock on the mntent_cache.
118 static inline void add_mntent_mapping(uint32_t dev,
120 const char *mountpoint,
125 mntent_cache_entry_t *mce;
128 * Calculate the length of all strings so we can allocate the buffer
129 * as one big chunk of memory using the hash_malloc method.
131 len = strlen(special) + 1;
132 len += strlen(mountpoint) + 1;
133 len += strlen(fstype) + 1;
135 len += strlen(mntopts) + 1;
139 * We allocate all members of the hash entry in the same memory chunk.
141 mce = (mntent_cache_entry_t *)mntent_cache_entry_hashtable->hash_malloc(sizeof(mntent_cache_entry_t) + len);
144 mce->special = (char *)mce + sizeof(mntent_cache_entry_t);
145 strcpy(mce->special, special);
147 mce->mountpoint = mce->special + strlen(mce->special) + 1;
148 strcpy(mce->mountpoint, mountpoint);
150 mce->fstype = mce->mountpoint + strlen(mce->mountpoint) + 1;
151 strcpy(mce->fstype, fstype);
154 mce->mntopts = mce->fstype + strlen(mce->fstype) + 1;
155 strcpy(mce->mntopts, mntopts);
160 mntent_cache_entry_hashtable->insert(mce->dev, mce);
163 static inline bool skip_fstype(const char *fstype)
167 for (i = 0; skipped_fs_types[i]; i++) {
168 if (bstrcmp(fstype, skipped_fs_types[i]))
176 * OS specific function to load the different mntents into the cache.
177 * This function should be called with a write lock on the mntent_cache.
179 static void refresh_mount_cache(void)
181 #if defined(HAVE_GETMNTENT)
184 #if defined(HAVE_LINUX_OS) || defined(HAVE_HPUX_OS) || defined(HAVE_IRIX_OS) || defined(HAVE_AIX_OS)
187 #if defined(HAVE_LINUX_OS)
188 if ((fp = setmntent("/proc/mounts", "r")) == (FILE *)NULL) {
189 if ((fp = setmntent(_PATH_MOUNTED, "r")) == (FILE *)NULL) {
193 #elif defined(HAVE_HPUX_OS)
194 if ((fp = fopen(MNT_MNTTAB, "r")) == (FILE *)NULL) {
197 #elif defined(HAVE_IRIX_OS)
198 if ((fp = setmntent(MOUNTED, "r")) == (FILE *)NULL) {
201 #elif defined(HAVE_AIX_OS)
202 if ((fp = setmntent(MNTTAB, "r")) == (FILE *)NULL) {
207 while ((mnt = getmntent(fp)) != (struct mntent *)NULL) {
208 if (skip_fstype(mnt->mnt_type)) {
212 if (stat(mnt->mnt_dir, &st) < 0) {
216 add_mntent_mapping(st.st_dev, mnt->mnt_fsname, mnt->mnt_dir, mnt->mnt_type, mnt->mnt_opts);
220 #elif defined(HAVE_SUN_OS)
223 if ((fp = fopen(MNTTAB, "r")) == (FILE *)NULL)
226 while (getmntent(fp, &mnt) == 0) {
227 if (skip_fstype(mnt.mnt_fstype)) {
231 if (stat(mnt.mnt_mountp, &st) < 0) {
235 add_mntent_mapping(st.st_dev, mnt.mnt_special, mnt.mnt_mountp, mnt.mnt_fstype, mnt.mnt_mntopts);
239 #endif /* HAVE_SUN_OS */
240 #elif defined(HAVE_GETMNTINFO)
243 #if defined(HAVE_NETBSD_OS)
244 struct statvfs *mntinfo;
246 struct statfs *mntinfo;
248 #if defined(ST_NOWAIT)
249 int flags = ST_NOWAIT;
250 #elif defined(MNT_NOWAIT)
251 int flags = MNT_NOWAIT;
256 if ((cnt = getmntinfo(&mntinfo, flags)) > 0) {
258 if (!skip_fstype(mntinfo->f_fstypename) &&
259 stat(mntinfo->f_mntonname, &st) == 0) {
260 add_mntent_mapping(st.st_dev,
261 mntinfo->f_mntfromname,
262 mntinfo->f_mntonname,
263 mntinfo->f_fstypename,
270 #elif defined(HAVE_AIX_OS)
272 char *entries, *current;
278 if (mntctl(MCTL_QUERY, sizeof(bufsize), (struct vmount *)&bufsize) != 0) {
282 entries = malloc(bufsize);
283 if ((n_entries = mntctl(MCTL_QUERY, bufsize, (struct vmount *) entries)) < 0) {
290 while (cnt < n_entries) {
291 vmp = (struct vmount *)current;
293 if (skip_fstype(ve->vfsent_name)) {
297 if (stat(current + vmp->vmt_data[VMT_STUB].vmt_off, &st) < 0) {
301 ve = getvfsbytype(vmp->vmt_gfstype);
302 if (ve && ve->vfsent_name) {
303 add_mntent_mapping(st.st_dev,
304 current + vmp->vmt_data[VMT_OBJECT].vmt_off,
305 current + vmp->vmt_data[VMT_STUB].vmt_off,
307 current + vmp->vmt_data[VMT_ARGS].vmt_off);
309 current = current + vmp->vmt_length;
313 #elif defined(HAVE_OSF1_OS)
314 struct statfs *entries, *current;
319 if ((n_entries = getfsstat((struct statfs *)0, 0L, MNT_NOWAIT)) < 0) {
323 size = (n_entries + 1) * sizeof(struct statfs);
324 entries = malloc(size);
326 if ((n_entries = getfsstat(entries, size, MNT_NOWAIT)) < 0) {
333 while (cnt < n_entries) {
334 if (skip_fstype(current->f_fstypename)) {
338 if (stat(current->f_mntonname, &st) < 0) {
341 add_mntent_mapping(st.st_dev,
342 current->f_mntfromname,
343 current->f_mntonname,
344 current->f_fstypename,
354 * Clear the cache (either by flushing it or by initializing it.)
355 * This function should be called with a write lock on the mntent_cache.
357 static void clear_mount_cache()
359 mntent_cache_entry_t *mce = NULL;
361 if (!mntent_cache_entry_hashtable) {
363 * Initialize the hash table.
365 mntent_cache_entry_hashtable = (htable *)malloc(sizeof(htable));
366 mntent_cache_entry_hashtable->init(mce, &mce->link,
367 NR_MNTENT_CACHE_ENTRIES,
368 NR_MNTENT_HTABLE_PAGES);
371 * Clear the previous_cache_hit.
373 previous_cache_hit = NULL;
376 * Destroy the current content and (re)initialize the hashtable.
378 mntent_cache_entry_hashtable->destroy();
379 mntent_cache_entry_hashtable->init(mce, &mce->link,
380 NR_MNTENT_CACHE_ENTRIES,
381 NR_MNTENT_HTABLE_PAGES);
386 * Initialize the cache for use.
387 * This function should be called with a write lock on the mntent_cache.
389 static void initialize_mntent_cache(void)
392 * Make sure the cache is empty (either by flushing it or by initializing it.)
399 refresh_mount_cache();
403 * Flush the current content from the cache.
405 void flush_mntent_cache(void)
410 P(mntent_cache_lock);
412 if (mntent_cache_entry_hashtable) {
413 previous_cache_hit = NULL;
414 mntent_cache_entry_hashtable->destroy();
415 mntent_cache_entry_hashtable = NULL;
418 V(mntent_cache_lock);
422 * Find a mapping in the cache.
424 mntent_cache_entry_t *find_mntent_mapping(uint32_t dev)
426 mntent_cache_entry_t *mce = NULL;
432 P(mntent_cache_lock);
435 * Shortcut when we get a request for the same device again.
437 if (previous_cache_hit && previous_cache_hit->dev == dev) {
438 mce = previous_cache_hit;
443 * Initialize the cache if that was not done before.
445 if (!mntent_cache_entry_hashtable) {
446 initialize_mntent_cache();
447 last_rescan = time(NULL);
450 * We rescan the mountlist when called when more then
451 * MNTENT_RESCAN_INTERVAL seconds have past since the
452 * last rescan. This way we never work with data older
453 * then MNTENT_RESCAN_INTERVAL seconds.
456 if ((now - last_rescan) > MNTENT_RESCAN_INTERVAL) {
457 initialize_mntent_cache();
461 mce = (mntent_cache_entry_t *)mntent_cache_entry_hashtable->lookup(dev);
464 * If we fail to lookup the mountpoint its probably a mountpoint added
465 * after we did our initial scan. Lets rescan the mountlist and try
469 initialize_mntent_cache();
470 mce = (mntent_cache_entry_t *)mntent_cache_entry_hashtable->lookup(dev);
474 * Store the last successfull lookup as the previous_cache_hit.
477 previous_cache_hit = mce;
481 V(mntent_cache_lock);