2 Bacula® - The Network Backup Solution
4 Copyright (C) 2009-2011 Free Software Foundation Europe e.V.
6 The main author of Bacula is Kern Sibbald, with contributions from
7 many others, a complete list can be found in the file AUTHORS.
8 This program is Free Software; you can redistribute it and/or
9 modify it under the terms of version three of the GNU Affero General Public
10 License as published by the Free Software Foundation and included
13 This program is distributed in the hope that it will be useful, but
14 WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 General Public License for more details.
18 You should have received a copy of the GNU Affero General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
23 Bacula® is a registered trademark of Kern Sibbald.
24 The licensor of Bacula is the Free Software Foundation Europe
25 (FSFE), Fiduciary Program, Sumatrastrasse 25, 8006 Zürich,
26 Switzerland, email:ftf@fsfeurope.org.
30 * This code implements a cache with the current mounted filesystems for which
31 * its uses the mostly in kernel mount information and export the different OS
32 * specific interfaces using a generic interface. We use a hashed cache which is
33 * accessed using a hash on the device id and we keep the previous cache hit as
34 * most of the time we get called quite a lot with most of the time the same
35 * device so keeping the previous cache hit we have a very optimized code path.
37 * This interface is implemented for the following OS-es:
47 * Currently we only use this code for Linux and OSF1 based fstype determination.
48 * For the other OS-es we can use the fstype present in stat structure on those OS-es.
50 * This code replaces the big switch we used before based on SUPER_MAGIC present in
51 * the statfs(2) structure but which need extra code for each new filesystem added to
52 * the OS and for Linux that tends to be often as it has quite some different filesystems.
53 * This new implementation should eliminate this as we use the Linux /proc/mounts in kernel
54 * data which automatically adds any new filesystem when added to the kernel.
58 * Marco van Wieringen, August 2009
62 #include "mntent_cache.h"
67 #include <sys/types.h>
70 #if defined(HAVE_GETMNTENT)
71 #if defined(HAVE_LINUX_OS) || defined(HAVE_HPUX_OS) || defined(HAVE_AIX_OS)
73 #elif defined(HAVE_SUN_OS)
74 #include <sys/mnttab.h>
75 #endif /* HAVE_GETMNTENT */
76 #elif defined(HAVE_GETMNTINFO)
77 #if defined(HAVE_OPENBSD_OS)
78 #include <sys/param.h>
79 #include <sys/mount.h>
80 #elif defined(HAVE_NETBSD_OS)
81 #include <sys/types.h>
82 #include <sys/statvfs.h>
84 #include <sys/param.h>
85 #include <sys/ucred.h>
86 #include <sys/mount.h>
88 #elif defined(HAVE_AIX_OS)
91 #elif defined(HAVE_OSF1_OS)
92 #include <sys/mount.h>
96 * Protected data by mutex lock.
98 static pthread_mutex_t mntent_cache_lock = PTHREAD_MUTEX_INITIALIZER;
99 static mntent_cache_entry_t *previous_cache_hit = NULL;
100 static htable *mntent_cache_entry_hashtable = NULL;
103 * Add a new entry to the cache.
104 * This function should be called with a write lock on the mntent_cache.
106 static void add_mntent_mapping(uint32_t dev, const char *special, const char *mountpoint,
107 const char *fstype, const char *mntopts)
110 mntent_cache_entry_t *mce;
113 * Calculate the length of all strings so we can allocate the buffer
114 * as one big chunk of memory using the hash_malloc method.
116 len = strlen(special) + 1;
117 len += strlen(mountpoint) + 1;
118 len += strlen(fstype) + 1;
120 len += strlen(mntopts) + 1;
124 * We allocate all members of the hash entry in the same memory chunk.
126 mce = (mntent_cache_entry_t *)mntent_cache_entry_hashtable->hash_malloc(sizeof(mntent_cache_entry_t) + len);
129 mce->special = (char *)mce + sizeof(mntent_cache_entry_t);
130 strcpy(mce->special, special);
132 mce->mountpoint = mce->special + strlen(mce->special) + 1;
133 strcpy(mce->mountpoint, mountpoint);
135 mce->fstype = mce->mountpoint + strlen(mce->mountpoint) + 1;
136 strcpy(mce->fstype, fstype);
139 mce->mntopts = mce->fstype + strlen(mce->fstype) + 1;
140 strcpy(mce->mntopts, mntopts);
145 mntent_cache_entry_hashtable->insert(mce->dev, mce);
149 * OS specific function to load the different mntents into the cache.
150 * This function should be called with a write lock on the mntent_cache.
152 static void refresh_mount_cache(void)
154 #if defined(HAVE_GETMNTENT)
157 #if defined(HAVE_LINUX_OS) || defined(HAVE_HPUX_OS) || defined(HAVE_IRIX_OS) || defined(HAVE_AIX_OS)
160 #if defined(HAVE_LINUX_OS)
161 if ((fp = setmntent("/proc/mounts", "r")) == (FILE *)NULL) {
162 if ((fp = setmntent(_PATH_MOUNTED, "r")) == (FILE *)NULL) {
166 #elif defined(HAVE_HPUX_OS)
167 if ((fp = fopen(MNT_MNTTAB, "r")) == (FILE *)NULL) {
170 #elif defined(HAVE_IRIX_OS)
171 if ((fp = setmntent(MOUNTED, "r")) == (FILE *)NULL) {
174 #elif defined(HAVE_AIX_OS)
175 if ((fp = setmntent(MNTTAB, "r")) == (FILE *)NULL) {
180 while ((mnt = getmntent(fp)) != (struct mntent *)NULL) {
181 if (stat(mnt->mnt_dir, &st) < 0) {
185 add_mntent_mapping(st.st_dev, mnt->mnt_fsname, mnt->mnt_dir, mnt->mnt_type, mnt->mnt_opts);
189 #elif defined(HAVE_SUN_OS)
192 if ((fp = fopen(MNTTAB, "r")) == (FILE *)NULL)
195 while (getmntent(fp, &mnt) == 0) {
196 if (stat(mnt.mnt_mountp, &st) < 0) {
200 add_mntent_mapping(st.st_dev, mnt.mnt_special, mnt.mnt_mountp, mnt.mnt_fstype, mnt.mnt_mntopts);
204 #endif /* HAVE_SUN_OS */
205 #elif defined(HAVE_GETMNTINFO)
208 #if defined(HAVE_NETBSD_OS)
209 struct statvfs *mntinfo;
211 struct statfs *mntinfo;
213 #if defined(ST_NOWAIT)
214 int flags = ST_NOWAIT;
215 #elif defined(MNT_NOWAIT)
216 int flags = MNT_NOWAIT;
221 if ((cnt = getmntinfo(&mntinfo, flags)) > 0) {
223 if (stat(mntinfo->f_mntonname, &st) == 0) {
224 add_mntent_mapping(st.st_dev,
225 mntinfo->f_mntfromname,
226 mntinfo->f_mntonname,
227 mntinfo->f_fstypename,
234 #elif defined(HAVE_AIX_OS)
236 char *entries, *current;
242 if (mntctl(MCTL_QUERY, sizeof(bufsize), (struct vmount *)&bufsize) != 0) {
246 entries = malloc(bufsize);
247 if ((n_entries = mntctl(MCTL_QUERY, bufsize, (struct vmount *) entries)) < 0) {
254 while (cnt < n_entries) {
255 vmp = (struct vmount *)current;
257 if (stat(current + vmp->vmt_data[VMT_STUB].vmt_off, &st) < 0) {
261 ve = getvfsbytype(vmp->vmt_gfstype);
262 if (ve && ve->vfsent_name) {
263 add_mntent_mapping(st.st_dev,
264 current + vmp->vmt_data[VMT_OBJECT].vmt_off,
265 current + vmp->vmt_data[VMT_STUB].vmt_off,
267 current + vmp->vmt_data[VMT_ARGS].vmt_off);
269 current = current + vmp->vmt_length;
273 #elif defined(HAVE_OSF1_OS)
274 struct statfs *entries, *current;
279 if ((n_entries = getfsstat((struct statfs *)0, 0L, MNT_NOWAIT)) < 0) {
283 size = (n_entries + 1) * sizeof(struct statfs);
284 entries = malloc(size);
286 if ((n_entries = getfsstat(entries, size, MNT_NOWAIT)) < 0) {
293 while (cnt < n_entries) {
294 if (stat(current->f_mntonname, &st) < 0) {
297 add_mntent_mapping(st.st_dev,
298 current->f_mntfromname,
299 current->f_mntonname,
300 current->f_fstypename,
310 * Clear the cache (either by flushing it or by initializing it.)
311 * This function should be called with a write lock on the mntent_cache.
313 static void clear_mount_cache()
315 mntent_cache_entry_t *mce = NULL;
317 if (!mntent_cache_entry_hashtable) {
319 * Initialize the hash table.
321 mntent_cache_entry_hashtable = (htable *)malloc(sizeof(htable));
322 mntent_cache_entry_hashtable->init(mce, &mce->link,
323 NR_MNTENT_CACHE_ENTRIES,
324 NR_MNTENT_HTABLE_PAGES);
327 * Clear the previous_cache_hit.
329 previous_cache_hit = NULL;
332 * Destroy the current content and (re)initialize the hashtable.
334 mntent_cache_entry_hashtable->destroy();
335 mntent_cache_entry_hashtable->init(mce, &mce->link,
336 NR_MNTENT_CACHE_ENTRIES,
337 NR_MNTENT_HTABLE_PAGES);
342 * Initialize the cache for use.
344 static void initialize_mntent_cache(void)
347 * Lock the cache while we update it.
349 P(mntent_cache_lock);
352 * Make sure the cache is empty (either by flushing it or by initializing it.)
359 refresh_mount_cache();
362 * We are done updating the cache.
364 V(mntent_cache_lock);
367 void preload_mntent_cache(void)
369 initialize_mntent_cache();
372 void flush_mntent_cache(void)
375 * Lock the cache while we update it.
377 P(mntent_cache_lock);
380 * Make sure the cache is empty (either by flushing it or by initializing it.)
385 * We are done updating the cache.
387 V(mntent_cache_lock);
391 * Find a mapping in the cache.
393 mntent_cache_entry_t *find_mntent_mapping(uint32_t dev)
395 mntent_cache_entry_t *mce = NULL;
398 * Initialize the cache if that was not done before.
400 if (!mntent_cache_entry_hashtable) {
401 initialize_mntent_cache();
405 * Shortcut when we get a request for the same device again.
407 if (previous_cache_hit && previous_cache_hit->dev == dev) {
408 return previous_cache_hit;
412 * Lock the cache while we walk it.
414 P(mntent_cache_lock);
416 mce = (mntent_cache_entry_t *)mntent_cache_entry_hashtable->lookup(dev);
418 previous_cache_hit = mce;
422 * We are done walking the cache.
424 V(mntent_cache_lock);