2 Bacula® - The Network Backup Solution
4 Copyright (C) 2009-2010 Free Software Foundation Europe e.V.
6 The main author of Bacula is Kern Sibbald, with contributions from
7 many others, a complete list can be found in the file AUTHORS.
8 This program is Free Software; you can redistribute it and/or
9 modify it under the terms of version three of the GNU Affero General Public
10 License as published by the Free Software Foundation and included
13 This program is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 General Public License for more details.
18 You should have received a copy of the GNU Affero General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23 Bacula® is a registered trademark of Kern Sibbald.
24 The licensor of Bacula is the Free Software Foundation Europe
25 (FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich,
26 Switzerland, email:ftf@fsfeurope.org.
30 * This code implements a cache with the current mounted filesystems for which
31 * its uses the mostly in kernel mount information and export the different OS
32 * specific interfaces using a generic interface. We use a hashed cache which is
33 * accessed using a hash on the device id and we keep the previous cache hit as
34 * most of the time we get called quite a lot with most of the time the same
35 * device so keeping the previous cache hit we have a very optimized code path.
37 * This interface is implemented for the following OS-es:
47 * Currently we only use this code for Linux and OSF1 based fstype determination.
48 * For the other OS-es we can use the fstype present in stat structure on those OS-es.
50 * This code replaces the big switch we used before based on SUPER_MAGIC present in
51 * the statfs(2) structure but which need extra code for each new filesystem added to
52 * the OS and for Linux that tends to be often as it has quite some different filesystems.
53 * This new implementation should eliminate this as we use the Linux /proc/mounts in kernel
54 * data which automatically adds any new filesystem when added to the kernel.
58 * Marco van Wieringen, August 2009
62 #include "mntent_cache.h"
67 #include <sys/types.h>
70 #if defined(HAVE_GETMNTENT)
71 #if defined(HAVE_LINUX_OS) || defined(HAVE_HPUX_OS) || defined(HAVE_AIX_OS)
73 #elif defined(HAVE_SUN_OS)
74 #include <sys/mnttab.h>
75 #endif /* HAVE_GETMNTENT */
76 #elif defined(HAVE_GETMNTINFO)
77 #if defined(HAVE_OPENBSD_OS)
78 #include <sys/param.h>
79 #include <sys/mount.h>
80 #elif defined(HAVE_NETBSD_OS)
81 #include <sys/types.h>
82 #include <sys/statvfs.h>
84 #include <sys/param.h>
85 #include <sys/ucred.h>
86 #include <sys/mount.h>
88 #elif defined(HAVE_AIX_OS)
91 #elif defined(HAVE_OSF1_OS)
92 #include <sys/mount.h>
95 static char cache_initialized = 0;
98 * Protected data by mutex lock.
100 static pthread_mutex_t mntent_cache_lock = PTHREAD_MUTEX_INITIALIZER;
101 static mntent_cache_entry_t *mntent_cache_entry_hashtable[NR_MNTENT_CACHE_ENTRIES];
102 static mntent_cache_entry_t *previous_cache_hit = NULL;
105 * Simple hash function.
107 static uint32_t mntent_hash_function(uint32_t dev)
109 return (dev % NR_MNTENT_CACHE_ENTRIES);
113 * Add a new entry to the cache.
114 * This function should be called with a write lock on the mntent_cache.
116 static void add_mntent_mapping(uint32_t dev, const char *special, const char *mountpoint,
117 const char *fstype, const char *mntopts)
120 mntent_cache_entry_t *mce;
123 * Select the correct hash bucket.
125 hash = mntent_hash_function(dev);
128 * See if this is the first being put into the hash bucket.
130 if (mntent_cache_entry_hashtable[hash] == (mntent_cache_entry_t *)NULL) {
131 mce = (mntent_cache_entry_t *)malloc(sizeof(mntent_cache_entry_t));
132 memset((caddr_t)mce, 0, sizeof(mntent_cache_entry_t));
133 mntent_cache_entry_hashtable[hash] = mce;
136 * Walk the linked list in the hash bucket.
138 for (mce = mntent_cache_entry_hashtable[hash]; mce->next != NULL; mce = mce->next) ;
139 mce->next = (mntent_cache_entry_t *)malloc(sizeof(mntent_cache_entry_t));
141 memset((caddr_t)mce, 0, sizeof(mntent_cache_entry_t));
145 mce->special = bstrdup(special);
146 mce->mountpoint = bstrdup(mountpoint);
147 mce->fstype = bstrdup(fstype);
149 mce->mntopts = bstrdup(mntopts);
154 * OS specific function to load the different mntents into the cache.
155 * This function should be called with a write lock on the mntent_cache.
157 static void refresh_mount_cache(void)
159 #if defined(HAVE_GETMNTENT)
162 #if defined(HAVE_LINUX_OS) || defined(HAVE_HPUX_OS) || defined(HAVE_IRIX_OS) || defined(HAVE_AIX_OS)
165 #if defined(HAVE_LINUX_OS)
166 if ((fp = setmntent("/proc/mounts", "r")) == (FILE *)NULL) {
167 if ((fp = setmntent(_PATH_MOUNTED, "r")) == (FILE *)NULL) {
171 #elif defined(HAVE_HPUX_OS)
172 if ((fp = fopen(MNT_MNTTAB, "r")) == (FILE *)NULL) {
175 #elif defined(HAVE_IRIX_OS)
176 if ((fp = setmntent(MOUNTED, "r")) == (FILE *)NULL) {
179 #elif defined(HAVE_AIX_OS)
180 if ((fp = setmntent(MNTTAB, "r")) == (FILE *)NULL) {
185 while ((mnt = getmntent(fp)) != (struct mntent *)NULL) {
186 if (stat(mnt->mnt_dir, &st) < 0) {
190 add_mntent_mapping(st.st_dev, mnt->mnt_fsname, mnt->mnt_dir, mnt->mnt_type, mnt->mnt_opts);
194 #elif defined(HAVE_SUN_OS)
197 if ((fp = fopen(MNTTAB, "r")) == (FILE *)NULL)
200 while (getmntent(fp, &mnt) == 0) {
201 if (stat(mnt.mnt_mountp, &st) < 0) {
205 add_mntent_mapping(st.st_dev, mnt.mnt_special, mnt.mnt_mountp, mnt.mnt_fstype, mnt.mnt_mntopts);
209 #endif /* HAVE_SUN_OS */
210 #elif defined(HAVE_GETMNTINFO)
213 #if defined(HAVE_NETBSD_OS)
214 struct statvfs *mntinfo;
216 struct statfs *mntinfo;
218 #if defined(ST_NOWAIT)
219 int flags = ST_NOWAIT;
220 #elif defined(MNT_NOWAIT)
221 int flags = MNT_NOWAIT;
226 if ((cnt = getmntinfo(&mntinfo, flags)) > 0) {
228 if (stat(mntinfo->f_mntonname, &st) == 0) {
229 add_mntent_mapping(st.st_dev,
230 mntinfo->f_mntfromname,
231 mntinfo->f_mntonname,
232 mntinfo->f_fstypename,
239 #elif defined(HAVE_AIX_OS)
241 char *entries, *current;
247 if (mntctl(MCTL_QUERY, sizeof(bufsize), (struct vmount *)&bufsize) != 0) {
251 entries = malloc(bufsize);
252 if ((n_entries = mntctl(MCTL_QUERY, bufsize, (struct vmount *) entries)) < 0) {
259 while (cnt < n_entries) {
260 vmp = (struct vmount *)current;
262 if (stat(current + vmp->vmt_data[VMT_STUB].vmt_off, &st) < 0) {
266 ve = getvfsbytype(vmp->vmt_gfstype);
267 if (ve && ve->vfsent_name) {
268 add_mntent_mapping(st.st_dev,
269 current + vmp->vmt_data[VMT_OBJECT].vmt_off,
270 current + vmp->vmt_data[VMT_STUB].vmt_off,
272 current + vmp->vmt_data[VMT_ARGS].vmt_off);
274 current = current + vmp->vmt_length;
278 #elif defined(HAVE_OSF1_OS)
279 struct statfs *entries, *current;
284 if ((n_entries = getfsstat((struct statfs *)0, 0L, MNT_NOWAIT)) < 0) {
288 size = (n_entries + 1) * sizeof(struct statfs);
289 entries = malloc(size);
291 if ((n_entries = getfsstat(entries, size, MNT_NOWAIT)) < 0) {
298 while (cnt < n_entries) {
299 if (stat(current->f_mntonname, &st) < 0) {
302 add_mntent_mapping(st.st_dev,
303 current->f_mntfromname,
304 current->f_mntonname,
305 current->f_fstypename,
315 * Clear the cache (either by flushing it or by initializing it.)
316 * This function should be called with a write lock on the mntent_cache.
318 static void clear_mount_cache()
321 mntent_cache_entry_t *mce, *mce_next;
323 if (cache_initialized == 0) {
325 * Initialize the hash table.
327 memset((caddr_t)mntent_cache_entry_hashtable, 0, NR_MNTENT_CACHE_ENTRIES * sizeof(mntent_cache_entry_t *));
328 cache_initialized = 1;
331 * Clear the previous_cache_hit.
333 previous_cache_hit = NULL;
336 * Walk all hash buckets.
338 for (hash = 0; hash < NR_MNTENT_CACHE_ENTRIES; hash++) {
340 * Walk the content of this hash bucket.
342 mce = mntent_cache_entry_hashtable[hash];
343 mntent_cache_entry_hashtable[hash] = NULL;
344 while (mce != NULL) {
346 * Save the pointer to the next entry.
348 mce_next = mce->next;
351 * Free the structure.
356 free(mce->mountpoint);
367 * Initialize the cache for use.
369 static void initialize_mntent_cache(void)
372 * Lock the cache while we update it.
374 P(mntent_cache_lock);
377 * Make sure the cache is empty (either by flushing it or by initializing it.)
384 refresh_mount_cache();
387 * We are done updating the cache.
389 V(mntent_cache_lock);
392 void preload_mntent_cache(void)
394 initialize_mntent_cache();
397 void flush_mntent_cache(void)
400 * Lock the cache while we update it.
402 P(mntent_cache_lock);
405 * Make sure the cache is empty (either by flushing it or by initializing it.)
410 * We are done updating the cache.
412 V(mntent_cache_lock);
416 * Find a mapping in the cache.
418 mntent_cache_entry_t *find_mntent_mapping(uint32_t dev)
421 mntent_cache_entry_t *mce;
424 * Initialize the cache if that was not done before.
426 if (cache_initialized == 0) {
427 initialize_mntent_cache();
431 * Shortcut when we get a request for the same device again.
433 if (previous_cache_hit && previous_cache_hit->dev == dev) {
434 return previous_cache_hit;
438 * Lock the cache while we walk it.
440 P(mntent_cache_lock);
443 * Select the correct hash bucket.
445 hash = mntent_hash_function(dev);
448 * Walk the hash bucket.
450 for (mce = mntent_cache_entry_hashtable[hash]; mce != NULL; mce = mce->next) {
451 if (mce->dev == dev) {
452 previous_cache_hit = mce;
453 V(mntent_cache_lock);
459 * We are done walking the cache.
461 V(mntent_cache_lock);