1 | /* $NetBSD: union_subr.c,v 1.74 2016/08/20 12:37:08 hannken Exp $ */ |
2 | |
3 | /* |
4 | * Copyright (c) 1994 |
5 | * The Regents of the University of California. All rights reserved. |
6 | * |
7 | * This code is derived from software contributed to Berkeley by |
8 | * Jan-Simon Pendry. |
9 | * |
10 | * Redistribution and use in source and binary forms, with or without |
11 | * modification, are permitted provided that the following conditions |
12 | * are met: |
13 | * 1. Redistributions of source code must retain the above copyright |
14 | * notice, this list of conditions and the following disclaimer. |
15 | * 2. Redistributions in binary form must reproduce the above copyright |
16 | * notice, this list of conditions and the following disclaimer in the |
17 | * documentation and/or other materials provided with the distribution. |
18 | * 3. Neither the name of the University nor the names of its contributors |
19 | * may be used to endorse or promote products derived from this software |
20 | * without specific prior written permission. |
21 | * |
22 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
23 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
24 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
25 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
26 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
27 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
28 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
29 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
30 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
31 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
32 | * SUCH DAMAGE. |
33 | * |
34 | * @(#)union_subr.c 8.20 (Berkeley) 5/20/95 |
35 | */ |
36 | |
37 | /* |
38 | * Copyright (c) 1994 Jan-Simon Pendry |
39 | * |
40 | * This code is derived from software contributed to Berkeley by |
41 | * Jan-Simon Pendry. |
42 | * |
43 | * Redistribution and use in source and binary forms, with or without |
44 | * modification, are permitted provided that the following conditions |
45 | * are met: |
46 | * 1. Redistributions of source code must retain the above copyright |
47 | * notice, this list of conditions and the following disclaimer. |
48 | * 2. Redistributions in binary form must reproduce the above copyright |
49 | * notice, this list of conditions and the following disclaimer in the |
50 | * documentation and/or other materials provided with the distribution. |
51 | * 3. All advertising materials mentioning features or use of this software |
52 | * must display the following acknowledgement: |
53 | * This product includes software developed by the University of |
54 | * California, Berkeley and its contributors. |
55 | * 4. Neither the name of the University nor the names of its contributors |
56 | * may be used to endorse or promote products derived from this software |
57 | * without specific prior written permission. |
58 | * |
59 | * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND |
60 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
61 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
62 | * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE |
63 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
64 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
65 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
66 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
67 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
68 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
69 | * SUCH DAMAGE. |
70 | * |
71 | * @(#)union_subr.c 8.20 (Berkeley) 5/20/95 |
72 | */ |
73 | |
74 | #include <sys/cdefs.h> |
75 | __KERNEL_RCSID(0, "$NetBSD: union_subr.c,v 1.74 2016/08/20 12:37:08 hannken Exp $" ); |
76 | |
77 | #include <sys/param.h> |
78 | #include <sys/systm.h> |
79 | #include <sys/proc.h> |
80 | #include <sys/time.h> |
81 | #include <sys/kernel.h> |
82 | #include <sys/vnode.h> |
83 | #include <sys/namei.h> |
84 | #include <sys/malloc.h> |
85 | #include <sys/dirent.h> |
86 | #include <sys/file.h> |
87 | #include <sys/filedesc.h> |
88 | #include <sys/queue.h> |
89 | #include <sys/mount.h> |
90 | #include <sys/stat.h> |
91 | #include <sys/kauth.h> |
92 | |
93 | #include <uvm/uvm_extern.h> |
94 | |
95 | #include <fs/union/union.h> |
96 | #include <miscfs/genfs/genfs.h> |
97 | #include <miscfs/specfs/specdev.h> |
98 | |
99 | static LIST_HEAD(uhashhead, union_node) *uhashtbl; |
100 | static u_long uhash_mask; /* size of hash table - 1 */ |
101 | #define UNION_HASH(u, l) \ |
102 | ((((u_long) (u) + (u_long) (l)) >> 8) & uhash_mask) |
103 | #define NOHASH ((u_long)-1) |
104 | |
105 | static kmutex_t uhash_lock; |
106 | |
107 | void union_updatevp(struct union_node *, struct vnode *, struct vnode *); |
108 | static void union_ref(struct union_node *); |
109 | static void union_rele(struct union_node *); |
110 | static int union_do_lookup(struct vnode *, struct componentname *, kauth_cred_t, const char *); |
111 | int union_vn_close(struct vnode *, int, kauth_cred_t, struct lwp *); |
112 | static void union_dircache_r(struct vnode *, struct vnode ***, int *); |
113 | struct vnode *union_dircache(struct vnode *, struct lwp *); |
114 | |
115 | void |
116 | union_init(void) |
117 | { |
118 | |
119 | mutex_init(&uhash_lock, MUTEX_DEFAULT, IPL_NONE); |
120 | uhashtbl = hashinit(desiredvnodes, HASH_LIST, true, &uhash_mask); |
121 | } |
122 | |
123 | void |
124 | union_reinit(void) |
125 | { |
126 | struct union_node *un; |
127 | struct uhashhead *oldhash, *hash; |
128 | u_long oldmask, mask, val; |
129 | int i; |
130 | |
131 | hash = hashinit(desiredvnodes, HASH_LIST, true, &mask); |
132 | mutex_enter(&uhash_lock); |
133 | oldhash = uhashtbl; |
134 | oldmask = uhash_mask; |
135 | uhashtbl = hash; |
136 | uhash_mask = mask; |
137 | for (i = 0; i <= oldmask; i++) { |
138 | while ((un = LIST_FIRST(&oldhash[i])) != NULL) { |
139 | LIST_REMOVE(un, un_cache); |
140 | val = UNION_HASH(un->un_uppervp, un->un_lowervp); |
141 | LIST_INSERT_HEAD(&hash[val], un, un_cache); |
142 | } |
143 | } |
144 | mutex_exit(&uhash_lock); |
145 | hashdone(oldhash, HASH_LIST, oldmask); |
146 | } |
147 | |
148 | /* |
149 | * Free global unionfs resources. |
150 | */ |
151 | void |
152 | union_done(void) |
153 | { |
154 | |
155 | hashdone(uhashtbl, HASH_LIST, uhash_mask); |
156 | mutex_destroy(&uhash_lock); |
157 | |
158 | /* Make sure to unset the readdir hook. */ |
159 | vn_union_readdir_hook = NULL; |
160 | } |
161 | |
162 | void |
163 | union_updatevp(struct union_node *un, struct vnode *uppervp, |
164 | struct vnode *lowervp) |
165 | { |
166 | int ohash = UNION_HASH(un->un_uppervp, un->un_lowervp); |
167 | int nhash = UNION_HASH(uppervp, lowervp); |
168 | int docache = (lowervp != NULLVP || uppervp != NULLVP); |
169 | bool un_unlock; |
170 | |
171 | KASSERT(VOP_ISLOCKED(UNIONTOV(un)) == LK_EXCLUSIVE); |
172 | |
173 | mutex_enter(&uhash_lock); |
174 | |
175 | if (!docache || ohash != nhash) { |
176 | if (un->un_cflags & UN_CACHED) { |
177 | un->un_cflags &= ~UN_CACHED; |
178 | LIST_REMOVE(un, un_cache); |
179 | } |
180 | } |
181 | |
182 | if (un->un_lowervp != lowervp) { |
183 | if (un->un_lowervp) { |
184 | vrele(un->un_lowervp); |
185 | if (un->un_path) { |
186 | free(un->un_path, M_TEMP); |
187 | un->un_path = 0; |
188 | } |
189 | if (un->un_dirvp) { |
190 | vrele(un->un_dirvp); |
191 | un->un_dirvp = NULLVP; |
192 | } |
193 | } |
194 | un->un_lowervp = lowervp; |
195 | mutex_enter(&un->un_lock); |
196 | un->un_lowersz = VNOVAL; |
197 | mutex_exit(&un->un_lock); |
198 | } |
199 | |
200 | if (un->un_uppervp != uppervp) { |
201 | if (un->un_uppervp) { |
202 | un_unlock = false; |
203 | vrele(un->un_uppervp); |
204 | } else |
205 | un_unlock = true; |
206 | |
207 | mutex_enter(&un->un_lock); |
208 | un->un_uppervp = uppervp; |
209 | mutex_exit(&un->un_lock); |
210 | if (un_unlock) { |
211 | struct vop_unlock_args ap; |
212 | |
213 | ap.a_vp = UNIONTOV(un); |
214 | genfs_unlock(&ap); |
215 | } |
216 | mutex_enter(&un->un_lock); |
217 | un->un_uppersz = VNOVAL; |
218 | mutex_exit(&un->un_lock); |
219 | /* Update union vnode interlock. */ |
220 | if (uppervp != NULL) { |
221 | mutex_obj_hold(uppervp->v_interlock); |
222 | uvm_obj_setlock(&UNIONTOV(un)->v_uobj, |
223 | uppervp->v_interlock); |
224 | } |
225 | } |
226 | |
227 | if (docache && (ohash != nhash)) { |
228 | LIST_INSERT_HEAD(&uhashtbl[nhash], un, un_cache); |
229 | un->un_cflags |= UN_CACHED; |
230 | } |
231 | |
232 | mutex_exit(&uhash_lock); |
233 | } |
234 | |
235 | void |
236 | union_newlower(struct union_node *un, struct vnode *lowervp) |
237 | { |
238 | |
239 | union_updatevp(un, un->un_uppervp, lowervp); |
240 | } |
241 | |
242 | void |
243 | union_newupper(struct union_node *un, struct vnode *uppervp) |
244 | { |
245 | |
246 | union_updatevp(un, uppervp, un->un_lowervp); |
247 | } |
248 | |
249 | /* |
250 | * Keep track of size changes in the underlying vnodes. |
251 | * If the size changes, then callback to the vm layer |
252 | * giving priority to the upper layer size. |
253 | * |
254 | * Mutex un_lock hold on entry and released on return. |
255 | */ |
256 | void |
257 | union_newsize(struct vnode *vp, off_t uppersz, off_t lowersz) |
258 | { |
259 | struct union_node *un = VTOUNION(vp); |
260 | off_t sz; |
261 | |
262 | KASSERT(mutex_owned(&un->un_lock)); |
263 | /* only interested in regular files */ |
264 | if (vp->v_type != VREG) { |
265 | mutex_exit(&un->un_lock); |
266 | uvm_vnp_setsize(vp, 0); |
267 | return; |
268 | } |
269 | |
270 | sz = VNOVAL; |
271 | |
272 | if ((uppersz != VNOVAL) && (un->un_uppersz != uppersz)) { |
273 | un->un_uppersz = uppersz; |
274 | if (sz == VNOVAL) |
275 | sz = un->un_uppersz; |
276 | } |
277 | |
278 | if ((lowersz != VNOVAL) && (un->un_lowersz != lowersz)) { |
279 | un->un_lowersz = lowersz; |
280 | if (sz == VNOVAL) |
281 | sz = un->un_lowersz; |
282 | } |
283 | mutex_exit(&un->un_lock); |
284 | |
285 | if (sz != VNOVAL) { |
286 | #ifdef UNION_DIAGNOSTIC |
287 | printf("union: %s size now %qd\n" , |
288 | uppersz != VNOVAL ? "upper" : "lower" , sz); |
289 | #endif |
290 | uvm_vnp_setsize(vp, sz); |
291 | } |
292 | } |
293 | |
294 | static void |
295 | union_ref(struct union_node *un) |
296 | { |
297 | |
298 | KASSERT(mutex_owned(&uhash_lock)); |
299 | un->un_refs++; |
300 | } |
301 | |
302 | static void |
303 | union_rele(struct union_node *un) |
304 | { |
305 | |
306 | mutex_enter(&uhash_lock); |
307 | un->un_refs--; |
308 | if (un->un_refs > 0) { |
309 | mutex_exit(&uhash_lock); |
310 | return; |
311 | } |
312 | if (un->un_cflags & UN_CACHED) { |
313 | un->un_cflags &= ~UN_CACHED; |
314 | LIST_REMOVE(un, un_cache); |
315 | } |
316 | mutex_exit(&uhash_lock); |
317 | |
318 | if (un->un_pvp != NULLVP) |
319 | vrele(un->un_pvp); |
320 | if (un->un_uppervp != NULLVP) |
321 | vrele(un->un_uppervp); |
322 | if (un->un_lowervp != NULLVP) |
323 | vrele(un->un_lowervp); |
324 | if (un->un_dirvp != NULLVP) |
325 | vrele(un->un_dirvp); |
326 | if (un->un_path) |
327 | free(un->un_path, M_TEMP); |
328 | mutex_destroy(&un->un_lock); |
329 | |
330 | free(un, M_TEMP); |
331 | } |
332 | |
333 | /* |
334 | * allocate a union_node/vnode pair. the vnode is |
335 | * referenced and unlocked. the new vnode is returned |
336 | * via (vpp). (mp) is the mountpoint of the union filesystem, |
337 | * (dvp) is the parent directory where the upper layer object |
338 | * should exist (but doesn't) and (cnp) is the componentname |
339 | * information which is partially copied to allow the upper |
340 | * layer object to be created at a later time. (uppervp) |
341 | * and (lowervp) reference the upper and lower layer objects |
342 | * being mapped. either, but not both, can be nil. |
343 | * both, if supplied, are unlocked. |
344 | * the reference is either maintained in the new union_node |
345 | * object which is allocated, or they are vrele'd. |
346 | * |
347 | * all union_nodes are maintained on a hash |
348 | * list. new nodes are only allocated when they cannot |
349 | * be found on this list. entries on the list are |
350 | * removed when the vfs reclaim entry is called. |
351 | * |
352 | * the vnode gets attached or referenced with vcache_get(). |
353 | */ |
354 | int |
355 | union_allocvp( |
356 | struct vnode **vpp, |
357 | struct mount *mp, |
358 | struct vnode *undvp, /* parent union vnode */ |
359 | struct vnode *dvp, /* may be null */ |
360 | struct componentname *cnp, /* may be null */ |
361 | struct vnode *uppervp, /* may be null */ |
362 | struct vnode *lowervp, /* may be null */ |
363 | int docache) |
364 | { |
365 | int error; |
366 | struct union_node *un = NULL, *un1; |
367 | struct vnode *vp, *xlowervp = NULLVP; |
368 | u_long hash[3]; |
369 | int try; |
370 | bool is_dotdot; |
371 | |
372 | is_dotdot = (dvp != NULL && cnp != NULL && (cnp->cn_flags & ISDOTDOT)); |
373 | |
374 | if (uppervp == NULLVP && lowervp == NULLVP) |
375 | panic("union: unidentifiable allocation" ); |
376 | |
377 | if (uppervp && lowervp && (uppervp->v_type != lowervp->v_type)) { |
378 | xlowervp = lowervp; |
379 | lowervp = NULLVP; |
380 | } |
381 | |
382 | if (!docache) { |
383 | un = NULL; |
384 | goto found; |
385 | } |
386 | |
387 | /* |
388 | * If both uppervp and lowervp are not NULL we have to |
389 | * search union nodes with one vnode as NULL too. |
390 | */ |
391 | hash[0] = UNION_HASH(uppervp, lowervp); |
392 | if (uppervp == NULL || lowervp == NULL) { |
393 | hash[1] = hash[2] = NOHASH; |
394 | } else { |
395 | hash[1] = UNION_HASH(uppervp, NULLVP); |
396 | hash[2] = UNION_HASH(NULLVP, lowervp); |
397 | } |
398 | |
399 | loop: |
400 | mutex_enter(&uhash_lock); |
401 | |
402 | for (try = 0; try < 3; try++) { |
403 | if (hash[try] == NOHASH) |
404 | continue; |
405 | LIST_FOREACH(un, &uhashtbl[hash[try]], un_cache) { |
406 | if ((un->un_lowervp && un->un_lowervp != lowervp) || |
407 | (un->un_uppervp && un->un_uppervp != uppervp) || |
408 | un->un_mount != mp) |
409 | continue; |
410 | |
411 | union_ref(un); |
412 | mutex_exit(&uhash_lock); |
413 | error = vcache_get(mp, &un, sizeof(un), &vp); |
414 | KASSERT(error != 0 || UNIONTOV(un) == vp); |
415 | union_rele(un); |
416 | if (error == ENOENT) |
417 | goto loop; |
418 | else if (error) |
419 | goto out; |
420 | goto found; |
421 | } |
422 | } |
423 | |
424 | mutex_exit(&uhash_lock); |
425 | |
426 | found: |
427 | if (un) { |
428 | if (uppervp != dvp) { |
429 | if (is_dotdot) |
430 | VOP_UNLOCK(dvp); |
431 | vn_lock(UNIONTOV(un), LK_EXCLUSIVE | LK_RETRY); |
432 | if (is_dotdot) |
433 | vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY); |
434 | } |
435 | /* |
436 | * Save information about the upper layer. |
437 | */ |
438 | if (uppervp != un->un_uppervp) { |
439 | union_newupper(un, uppervp); |
440 | } else if (uppervp) { |
441 | vrele(uppervp); |
442 | } |
443 | |
444 | /* |
445 | * Save information about the lower layer. |
446 | * This needs to keep track of pathname |
447 | * and directory information which union_vn_create |
448 | * might need. |
449 | */ |
450 | if (lowervp != un->un_lowervp) { |
451 | union_newlower(un, lowervp); |
452 | if (cnp && (lowervp != NULLVP)) { |
453 | un->un_path = malloc(cnp->cn_namelen+1, |
454 | M_TEMP, M_WAITOK); |
455 | memcpy(un->un_path, cnp->cn_nameptr, |
456 | cnp->cn_namelen); |
457 | un->un_path[cnp->cn_namelen] = '\0'; |
458 | vref(dvp); |
459 | un->un_dirvp = dvp; |
460 | } |
461 | } else if (lowervp) { |
462 | vrele(lowervp); |
463 | } |
464 | *vpp = UNIONTOV(un); |
465 | if (uppervp != dvp) |
466 | VOP_UNLOCK(*vpp); |
467 | error = 0; |
468 | goto out; |
469 | } |
470 | |
471 | un = malloc(sizeof(struct union_node), M_TEMP, M_WAITOK); |
472 | mutex_init(&un->un_lock, MUTEX_DEFAULT, IPL_NONE); |
473 | un->un_refs = 1; |
474 | un->un_mount = mp; |
475 | un->un_vnode = NULL; |
476 | un->un_uppervp = uppervp; |
477 | un->un_lowervp = lowervp; |
478 | un->un_pvp = undvp; |
479 | if (undvp != NULLVP) |
480 | vref(undvp); |
481 | un->un_dircache = 0; |
482 | un->un_openl = 0; |
483 | un->un_cflags = 0; |
484 | |
485 | un->un_uppersz = VNOVAL; |
486 | un->un_lowersz = VNOVAL; |
487 | |
488 | if (dvp && cnp && (lowervp != NULLVP)) { |
489 | un->un_path = malloc(cnp->cn_namelen+1, M_TEMP, M_WAITOK); |
490 | memcpy(un->un_path, cnp->cn_nameptr, cnp->cn_namelen); |
491 | un->un_path[cnp->cn_namelen] = '\0'; |
492 | vref(dvp); |
493 | un->un_dirvp = dvp; |
494 | } else { |
495 | un->un_path = 0; |
496 | un->un_dirvp = 0; |
497 | } |
498 | |
499 | if (docache) { |
500 | mutex_enter(&uhash_lock); |
501 | LIST_FOREACH(un1, &uhashtbl[hash[0]], un_cache) { |
502 | if (un1->un_lowervp == lowervp && |
503 | un1->un_uppervp == uppervp && |
504 | un1->un_mount == mp) { |
505 | /* |
506 | * Another thread beat us, push back freshly |
507 | * allocated node and retry. |
508 | */ |
509 | mutex_exit(&uhash_lock); |
510 | union_rele(un); |
511 | goto loop; |
512 | } |
513 | } |
514 | LIST_INSERT_HEAD(&uhashtbl[hash[0]], un, un_cache); |
515 | un->un_cflags |= UN_CACHED; |
516 | mutex_exit(&uhash_lock); |
517 | } |
518 | |
519 | error = vcache_get(mp, &un, sizeof(un), vpp); |
520 | KASSERT(error != 0 || UNIONTOV(un) == *vpp); |
521 | union_rele(un); |
522 | if (error == ENOENT) |
523 | goto loop; |
524 | |
525 | out: |
526 | if (xlowervp) |
527 | vrele(xlowervp); |
528 | |
529 | return error; |
530 | } |
531 | |
532 | int |
533 | union_freevp(struct vnode *vp) |
534 | { |
535 | struct union_node *un = VTOUNION(vp); |
536 | |
537 | /* Detach vnode from union node. */ |
538 | un->un_vnode = NULL; |
539 | un->un_uppersz = VNOVAL; |
540 | un->un_lowersz = VNOVAL; |
541 | |
542 | /* Detach union node from vnode. */ |
543 | mutex_enter(vp->v_interlock); |
544 | vp->v_data = NULL; |
545 | mutex_exit(vp->v_interlock); |
546 | |
547 | union_rele(un); |
548 | |
549 | return 0; |
550 | } |
551 | |
552 | int |
553 | union_loadvnode(struct mount *mp, struct vnode *vp, |
554 | const void *key, size_t key_len, const void **new_key) |
555 | { |
556 | struct vattr va; |
557 | struct vnode *svp; |
558 | struct union_node *un; |
559 | struct union_mount *um; |
560 | voff_t uppersz, lowersz; |
561 | |
562 | KASSERT(key_len == sizeof(un)); |
563 | memcpy(&un, key, key_len); |
564 | |
565 | um = MOUNTTOUNIONMOUNT(mp); |
566 | svp = (un->un_uppervp != NULLVP) ? un->un_uppervp : un->un_lowervp; |
567 | |
568 | vp->v_tag = VT_UNION; |
569 | vp->v_op = union_vnodeop_p; |
570 | vp->v_data = un; |
571 | un->un_vnode = vp; |
572 | |
573 | vp->v_type = svp->v_type; |
574 | if (svp->v_type == VCHR || svp->v_type == VBLK) |
575 | spec_node_init(vp, svp->v_rdev); |
576 | |
577 | mutex_obj_hold(svp->v_interlock); |
578 | uvm_obj_setlock(&vp->v_uobj, svp->v_interlock); |
579 | |
580 | /* detect the root vnode (and aliases) */ |
581 | if ((un->un_uppervp == um->um_uppervp) && |
582 | ((un->un_lowervp == NULLVP) || un->un_lowervp == um->um_lowervp)) { |
583 | if (un->un_lowervp == NULLVP) { |
584 | un->un_lowervp = um->um_lowervp; |
585 | if (un->un_lowervp != NULLVP) |
586 | vref(un->un_lowervp); |
587 | } |
588 | vp->v_vflag |= VV_ROOT; |
589 | } |
590 | |
591 | uppersz = lowersz = VNOVAL; |
592 | if (un->un_uppervp != NULLVP) { |
593 | if (vn_lock(un->un_uppervp, LK_SHARED) == 0) { |
594 | if (VOP_GETATTR(un->un_uppervp, &va, FSCRED) == 0) |
595 | uppersz = va.va_size; |
596 | VOP_UNLOCK(un->un_uppervp); |
597 | } |
598 | } |
599 | if (un->un_lowervp != NULLVP) { |
600 | if (vn_lock(un->un_lowervp, LK_SHARED) == 0) { |
601 | if (VOP_GETATTR(un->un_lowervp, &va, FSCRED) == 0) |
602 | lowersz = va.va_size; |
603 | VOP_UNLOCK(un->un_lowervp); |
604 | } |
605 | } |
606 | |
607 | mutex_enter(&un->un_lock); |
608 | union_newsize(vp, uppersz, lowersz); |
609 | |
610 | mutex_enter(&uhash_lock); |
611 | union_ref(un); |
612 | mutex_exit(&uhash_lock); |
613 | |
614 | *new_key = &vp->v_data; |
615 | |
616 | return 0; |
617 | } |
618 | |
619 | /* |
620 | * copyfile. copy the vnode (fvp) to the vnode (tvp) |
621 | * using a sequence of reads and writes. both (fvp) |
622 | * and (tvp) are locked on entry and exit. |
623 | */ |
624 | int |
625 | union_copyfile(struct vnode *fvp, struct vnode *tvp, kauth_cred_t cred, |
626 | struct lwp *l) |
627 | { |
628 | char *tbuf; |
629 | struct uio uio; |
630 | struct iovec iov; |
631 | int error = 0; |
632 | |
633 | /* |
634 | * strategy: |
635 | * allocate a buffer of size MAXBSIZE. |
636 | * loop doing reads and writes, keeping track |
637 | * of the current uio offset. |
638 | * give up at the first sign of trouble. |
639 | */ |
640 | |
641 | uio.uio_offset = 0; |
642 | UIO_SETUP_SYSSPACE(&uio); |
643 | |
644 | tbuf = malloc(MAXBSIZE, M_TEMP, M_WAITOK); |
645 | |
646 | /* ugly loop follows... */ |
647 | do { |
648 | off_t offset = uio.uio_offset; |
649 | |
650 | uio.uio_iov = &iov; |
651 | uio.uio_iovcnt = 1; |
652 | iov.iov_base = tbuf; |
653 | iov.iov_len = MAXBSIZE; |
654 | uio.uio_resid = iov.iov_len; |
655 | uio.uio_rw = UIO_READ; |
656 | error = VOP_READ(fvp, &uio, 0, cred); |
657 | |
658 | if (error == 0) { |
659 | uio.uio_iov = &iov; |
660 | uio.uio_iovcnt = 1; |
661 | iov.iov_base = tbuf; |
662 | iov.iov_len = MAXBSIZE - uio.uio_resid; |
663 | uio.uio_offset = offset; |
664 | uio.uio_rw = UIO_WRITE; |
665 | uio.uio_resid = iov.iov_len; |
666 | |
667 | if (uio.uio_resid == 0) |
668 | break; |
669 | |
670 | do { |
671 | error = VOP_WRITE(tvp, &uio, 0, cred); |
672 | } while ((uio.uio_resid > 0) && (error == 0)); |
673 | } |
674 | |
675 | } while (error == 0); |
676 | |
677 | free(tbuf, M_TEMP); |
678 | return (error); |
679 | } |
680 | |
681 | /* |
682 | * (un) is assumed to be locked on entry and remains |
683 | * locked on exit. |
684 | */ |
685 | int |
686 | union_copyup(struct union_node *un, int docopy, kauth_cred_t cred, |
687 | struct lwp *l) |
688 | { |
689 | int error; |
690 | struct vnode *lvp, *uvp; |
691 | struct vattr lvattr, uvattr; |
692 | |
693 | error = union_vn_create(&uvp, un, l); |
694 | if (error) |
695 | return (error); |
696 | |
697 | KASSERT(VOP_ISLOCKED(uvp) == LK_EXCLUSIVE); |
698 | union_newupper(un, uvp); |
699 | |
700 | lvp = un->un_lowervp; |
701 | |
702 | if (docopy) { |
703 | /* |
704 | * XX - should not ignore errors |
705 | * from VOP_CLOSE |
706 | */ |
707 | vn_lock(lvp, LK_EXCLUSIVE | LK_RETRY); |
708 | |
709 | error = VOP_GETATTR(lvp, &lvattr, cred); |
710 | if (error == 0) |
711 | error = VOP_OPEN(lvp, FREAD, cred); |
712 | if (error == 0) { |
713 | error = union_copyfile(lvp, uvp, cred, l); |
714 | (void) VOP_CLOSE(lvp, FREAD, cred); |
715 | } |
716 | if (error == 0) { |
717 | /* Copy permissions up too */ |
718 | vattr_null(&uvattr); |
719 | uvattr.va_mode = lvattr.va_mode; |
720 | uvattr.va_flags = lvattr.va_flags; |
721 | error = VOP_SETATTR(uvp, &uvattr, cred); |
722 | } |
723 | VOP_UNLOCK(lvp); |
724 | #ifdef UNION_DIAGNOSTIC |
725 | if (error == 0) |
726 | uprintf("union: copied up %s\n" , un->un_path); |
727 | #endif |
728 | |
729 | } |
730 | union_vn_close(uvp, FWRITE, cred, l); |
731 | |
732 | /* |
733 | * Subsequent IOs will go to the top layer, so |
734 | * call close on the lower vnode and open on the |
735 | * upper vnode to ensure that the filesystem keeps |
736 | * its references counts right. This doesn't do |
737 | * the right thing with (cred) and (FREAD) though. |
738 | * Ignoring error returns is not right, either. |
739 | */ |
740 | if (error == 0) { |
741 | int i; |
742 | |
743 | vn_lock(lvp, LK_EXCLUSIVE | LK_RETRY); |
744 | for (i = 0; i < un->un_openl; i++) { |
745 | (void) VOP_CLOSE(lvp, FREAD, cred); |
746 | (void) VOP_OPEN(uvp, FREAD, cred); |
747 | } |
748 | un->un_openl = 0; |
749 | VOP_UNLOCK(lvp); |
750 | } |
751 | |
752 | return (error); |
753 | |
754 | } |
755 | |
756 | /* |
757 | * Prepare the creation of a new node in the upper layer. |
758 | * |
759 | * (dvp) is the directory in which to create the new node. |
760 | * it is locked on entry and exit. |
761 | * (cnp) is the componentname to be created. |
762 | * (cred, path, hash) are credentials, path and its hash to fill (cnp). |
763 | */ |
764 | static int |
765 | union_do_lookup(struct vnode *dvp, struct componentname *cnp, kauth_cred_t cred, |
766 | const char *path) |
767 | { |
768 | int error; |
769 | struct vnode *vp; |
770 | |
771 | cnp->cn_nameiop = CREATE; |
772 | cnp->cn_flags = LOCKPARENT | ISLASTCN; |
773 | cnp->cn_cred = cred; |
774 | cnp->cn_nameptr = path; |
775 | cnp->cn_namelen = strlen(path); |
776 | |
777 | error = VOP_LOOKUP(dvp, &vp, cnp); |
778 | |
779 | if (error == 0) { |
780 | KASSERT(vp != NULL); |
781 | VOP_ABORTOP(dvp, cnp); |
782 | vrele(vp); |
783 | error = EEXIST; |
784 | } else if (error == EJUSTRETURN) { |
785 | error = 0; |
786 | } |
787 | |
788 | return error; |
789 | } |
790 | |
791 | /* |
792 | * Create a shadow directory in the upper layer. |
793 | * The new vnode is returned locked. |
794 | * |
795 | * (um) points to the union mount structure for access to the |
796 | * the mounting process's credentials. |
797 | * (dvp) is the directory in which to create the shadow directory. |
798 | * it is unlocked on entry and exit. |
799 | * (cnp) is the componentname to be created. |
800 | * (vpp) is the returned newly created shadow directory, which |
801 | * is returned locked. |
802 | * |
803 | * N.B. We still attempt to create shadow directories even if the union |
804 | * is mounted read-only, which is a little nonintuitive. |
805 | */ |
806 | int |
807 | union_mkshadow(struct union_mount *um, struct vnode *dvp, |
808 | struct componentname *cnp, struct vnode **vpp) |
809 | { |
810 | int error; |
811 | struct vattr va; |
812 | struct componentname cn; |
813 | char *pnbuf; |
814 | |
815 | if (cnp->cn_namelen + 1 > MAXPATHLEN) |
816 | return ENAMETOOLONG; |
817 | pnbuf = PNBUF_GET(); |
818 | memcpy(pnbuf, cnp->cn_nameptr, cnp->cn_namelen); |
819 | pnbuf[cnp->cn_namelen] = '\0'; |
820 | |
821 | vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY); |
822 | |
823 | error = union_do_lookup(dvp, &cn, |
824 | (um->um_op == UNMNT_ABOVE ? cnp->cn_cred : um->um_cred), pnbuf); |
825 | if (error) { |
826 | VOP_UNLOCK(dvp); |
827 | PNBUF_PUT(pnbuf); |
828 | return error; |
829 | } |
830 | |
831 | /* |
832 | * policy: when creating the shadow directory in the |
833 | * upper layer, create it owned by the user who did |
834 | * the mount, group from parent directory, and mode |
835 | * 777 modified by umask (ie mostly identical to the |
836 | * mkdir syscall). (jsp, kb) |
837 | */ |
838 | |
839 | vattr_null(&va); |
840 | va.va_type = VDIR; |
841 | va.va_mode = um->um_cmode; |
842 | |
843 | KASSERT(*vpp == NULL); |
844 | error = VOP_MKDIR(dvp, vpp, &cn, &va); |
845 | VOP_UNLOCK(dvp); |
846 | PNBUF_PUT(pnbuf); |
847 | return error; |
848 | } |
849 | |
850 | /* |
851 | * Create a whiteout entry in the upper layer. |
852 | * |
853 | * (um) points to the union mount structure for access to the |
854 | * the mounting process's credentials. |
855 | * (dvp) is the directory in which to create the whiteout. |
856 | * it is locked on entry and exit. |
857 | * (cnp) is the componentname to be created. |
858 | * (un) holds the path and its hash to be created. |
859 | */ |
860 | int |
861 | union_mkwhiteout(struct union_mount *um, struct vnode *dvp, |
862 | struct componentname *cnp, struct union_node *un) |
863 | { |
864 | int error; |
865 | struct componentname cn; |
866 | |
867 | error = union_do_lookup(dvp, &cn, |
868 | (um->um_op == UNMNT_ABOVE ? cnp->cn_cred : um->um_cred), |
869 | un->un_path); |
870 | if (error) |
871 | return error; |
872 | |
873 | error = VOP_WHITEOUT(dvp, &cn, CREATE); |
874 | return error; |
875 | } |
876 | |
877 | /* |
878 | * union_vn_create: creates and opens a new shadow file |
879 | * on the upper union layer. this function is similar |
880 | * in spirit to calling vn_open but it avoids calling namei(). |
881 | * the problem with calling namei is that a) it locks too many |
882 | * things, and b) it doesn't start at the "right" directory, |
883 | * whereas union_do_lookup is told where to start. |
884 | */ |
885 | int |
886 | union_vn_create(struct vnode **vpp, struct union_node *un, struct lwp *l) |
887 | { |
888 | struct vnode *vp; |
889 | kauth_cred_t cred = l->l_cred; |
890 | struct vattr vat; |
891 | struct vattr *vap = &vat; |
892 | int fmode = FFLAGS(O_WRONLY|O_CREAT|O_TRUNC|O_EXCL); |
893 | int error; |
894 | int cmode = UN_FILEMODE & ~l->l_proc->p_cwdi->cwdi_cmask; |
895 | struct componentname cn; |
896 | |
897 | *vpp = NULLVP; |
898 | |
899 | vn_lock(un->un_dirvp, LK_EXCLUSIVE | LK_RETRY); |
900 | |
901 | error = union_do_lookup(un->un_dirvp, &cn, l->l_cred, |
902 | un->un_path); |
903 | if (error) { |
904 | VOP_UNLOCK(un->un_dirvp); |
905 | return error; |
906 | } |
907 | |
908 | /* |
909 | * Good - there was no race to create the file |
910 | * so go ahead and create it. The permissions |
911 | * on the file will be 0666 modified by the |
912 | * current user's umask. Access to the file, while |
913 | * it is unioned, will require access to the top *and* |
914 | * bottom files. Access when not unioned will simply |
915 | * require access to the top-level file. |
916 | * TODO: confirm choice of access permissions. |
917 | */ |
918 | vattr_null(vap); |
919 | vap->va_type = VREG; |
920 | vap->va_mode = cmode; |
921 | vp = NULL; |
922 | error = VOP_CREATE(un->un_dirvp, &vp, &cn, vap); |
923 | if (error) { |
924 | VOP_UNLOCK(un->un_dirvp); |
925 | return error; |
926 | } |
927 | |
928 | vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); |
929 | VOP_UNLOCK(un->un_dirvp); |
930 | error = VOP_OPEN(vp, fmode, cred); |
931 | if (error) { |
932 | vput(vp); |
933 | return error; |
934 | } |
935 | |
936 | vp->v_writecount++; |
937 | *vpp = vp; |
938 | return 0; |
939 | } |
940 | |
941 | int |
942 | union_vn_close(struct vnode *vp, int fmode, kauth_cred_t cred, struct lwp *l) |
943 | { |
944 | |
945 | if (fmode & FWRITE) |
946 | --vp->v_writecount; |
947 | return (VOP_CLOSE(vp, fmode, cred)); |
948 | } |
949 | |
950 | void |
951 | union_removed_upper(struct union_node *un) |
952 | { |
953 | struct vnode *vp = UNIONTOV(un); |
954 | |
955 | vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); |
956 | #if 1 |
957 | /* |
958 | * We do not set the uppervp to NULLVP here, because lowervp |
959 | * may also be NULLVP, so this routine would end up creating |
960 | * a bogus union node with no upper or lower VP (that causes |
961 | * pain in many places that assume at least one VP exists). |
962 | * Since we've removed this node from the cache hash chains, |
963 | * it won't be found again. When all current holders |
964 | * release it, union_inactive() will vgone() it. |
965 | */ |
966 | union_diruncache(un); |
967 | #else |
968 | union_newupper(un, NULLVP); |
969 | #endif |
970 | |
971 | VOP_UNLOCK(vp); |
972 | |
973 | mutex_enter(&uhash_lock); |
974 | if (un->un_cflags & UN_CACHED) { |
975 | un->un_cflags &= ~UN_CACHED; |
976 | LIST_REMOVE(un, un_cache); |
977 | } |
978 | mutex_exit(&uhash_lock); |
979 | } |
980 | |
981 | #if 0 |
982 | struct vnode * |
983 | union_lowervp(struct vnode *vp) |
984 | { |
985 | struct union_node *un = VTOUNION(vp); |
986 | |
987 | if ((un->un_lowervp != NULLVP) && |
988 | (vp->v_type == un->un_lowervp->v_type)) { |
989 | if (vget(un->un_lowervp, 0, true /* wait */) == 0) |
990 | return (un->un_lowervp); |
991 | } |
992 | |
993 | return (NULLVP); |
994 | } |
995 | #endif |
996 | |
997 | /* |
998 | * determine whether a whiteout is needed |
999 | * during a remove/rmdir operation. |
1000 | */ |
1001 | int |
1002 | union_dowhiteout(struct union_node *un, kauth_cred_t cred) |
1003 | { |
1004 | struct vattr va; |
1005 | |
1006 | if (un->un_lowervp != NULLVP) |
1007 | return (1); |
1008 | |
1009 | if (VOP_GETATTR(un->un_uppervp, &va, cred) == 0 && |
1010 | (va.va_flags & OPAQUE)) |
1011 | return (1); |
1012 | |
1013 | return (0); |
1014 | } |
1015 | |
1016 | static void |
1017 | union_dircache_r(struct vnode *vp, struct vnode ***vppp, int *cntp) |
1018 | { |
1019 | struct union_node *un; |
1020 | |
1021 | if (vp->v_op != union_vnodeop_p) { |
1022 | if (vppp) { |
1023 | vref(vp); |
1024 | *(*vppp)++ = vp; |
1025 | if (--(*cntp) == 0) |
1026 | panic("union: dircache table too small" ); |
1027 | } else { |
1028 | (*cntp)++; |
1029 | } |
1030 | |
1031 | return; |
1032 | } |
1033 | |
1034 | un = VTOUNION(vp); |
1035 | if (un->un_uppervp != NULLVP) |
1036 | union_dircache_r(un->un_uppervp, vppp, cntp); |
1037 | if (un->un_lowervp != NULLVP) |
1038 | union_dircache_r(un->un_lowervp, vppp, cntp); |
1039 | } |
1040 | |
1041 | struct vnode * |
1042 | union_dircache(struct vnode *vp, struct lwp *l) |
1043 | { |
1044 | int cnt; |
1045 | struct vnode *nvp = NULLVP; |
1046 | struct vnode **vpp; |
1047 | struct vnode **dircache; |
1048 | int error; |
1049 | |
1050 | vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); |
1051 | dircache = VTOUNION(vp)->un_dircache; |
1052 | |
1053 | nvp = NULLVP; |
1054 | |
1055 | if (dircache == 0) { |
1056 | cnt = 0; |
1057 | union_dircache_r(vp, 0, &cnt); |
1058 | cnt++; |
1059 | dircache = (struct vnode **) |
1060 | malloc(cnt * sizeof(struct vnode *), |
1061 | M_TEMP, M_WAITOK); |
1062 | vpp = dircache; |
1063 | union_dircache_r(vp, &vpp, &cnt); |
1064 | VTOUNION(vp)->un_dircache = dircache; |
1065 | *vpp = NULLVP; |
1066 | vpp = dircache + 1; |
1067 | } else { |
1068 | vpp = dircache; |
1069 | do { |
1070 | if (*vpp++ == VTOUNION(vp)->un_uppervp) |
1071 | break; |
1072 | } while (*vpp != NULLVP); |
1073 | } |
1074 | |
1075 | if (*vpp == NULLVP) |
1076 | goto out; |
1077 | |
1078 | vref(*vpp); |
1079 | error = union_allocvp(&nvp, vp->v_mount, NULLVP, NULLVP, 0, *vpp, NULLVP, 0); |
1080 | if (!error) { |
1081 | vn_lock(nvp, LK_EXCLUSIVE | LK_RETRY); |
1082 | VTOUNION(vp)->un_dircache = 0; |
1083 | VTOUNION(nvp)->un_dircache = dircache; |
1084 | } |
1085 | |
1086 | out: |
1087 | VOP_UNLOCK(vp); |
1088 | return (nvp); |
1089 | } |
1090 | |
1091 | void |
1092 | union_diruncache(struct union_node *un) |
1093 | { |
1094 | struct vnode **vpp; |
1095 | |
1096 | KASSERT(VOP_ISLOCKED(UNIONTOV(un)) == LK_EXCLUSIVE); |
1097 | if (un->un_dircache != 0) { |
1098 | for (vpp = un->un_dircache; *vpp != NULLVP; vpp++) |
1099 | vrele(*vpp); |
1100 | free(un->un_dircache, M_TEMP); |
1101 | un->un_dircache = 0; |
1102 | } |
1103 | } |
1104 | |
1105 | /* |
1106 | * Check whether node can rmdir (check empty). |
1107 | */ |
1108 | int |
1109 | union_check_rmdir(struct union_node *un, kauth_cred_t cred) |
1110 | { |
1111 | int dirlen, eofflag, error; |
1112 | char *dirbuf; |
1113 | struct vattr va; |
1114 | struct vnode *tvp; |
1115 | struct dirent *dp, *edp; |
1116 | struct componentname cn; |
1117 | struct iovec aiov; |
1118 | struct uio auio; |
1119 | |
1120 | KASSERT(un->un_uppervp != NULL); |
1121 | |
1122 | /* Check upper for being opaque. */ |
1123 | KASSERT(VOP_ISLOCKED(un->un_uppervp)); |
1124 | error = VOP_GETATTR(un->un_uppervp, &va, cred); |
1125 | if (error || (va.va_flags & OPAQUE)) |
1126 | return error; |
1127 | |
1128 | if (un->un_lowervp == NULL) |
1129 | return 0; |
1130 | |
1131 | /* Check lower for being empty. */ |
1132 | vn_lock(un->un_lowervp, LK_SHARED | LK_RETRY); |
1133 | error = VOP_GETATTR(un->un_lowervp, &va, cred); |
1134 | if (error) { |
1135 | VOP_UNLOCK(un->un_lowervp); |
1136 | return error; |
1137 | } |
1138 | dirlen = va.va_blocksize; |
1139 | dirbuf = kmem_alloc(dirlen, KM_SLEEP); |
1140 | if (dirbuf == NULL) { |
1141 | VOP_UNLOCK(un->un_lowervp); |
1142 | return ENOMEM; |
1143 | } |
1144 | /* error = 0; */ |
1145 | eofflag = 0; |
1146 | auio.uio_offset = 0; |
1147 | do { |
1148 | aiov.iov_len = dirlen; |
1149 | aiov.iov_base = dirbuf; |
1150 | auio.uio_iov = &aiov; |
1151 | auio.uio_iovcnt = 1; |
1152 | auio.uio_resid = aiov.iov_len; |
1153 | auio.uio_rw = UIO_READ; |
1154 | UIO_SETUP_SYSSPACE(&auio); |
1155 | error = VOP_READDIR(un->un_lowervp, &auio, cred, &eofflag, |
1156 | NULL, NULL); |
1157 | if (error) |
1158 | break; |
1159 | edp = (struct dirent *)&dirbuf[dirlen - auio.uio_resid]; |
1160 | for (dp = (struct dirent *)dirbuf; |
1161 | error == 0 && dp < edp; |
1162 | dp = (struct dirent *)((char *)dp + dp->d_reclen)) { |
1163 | if (dp->d_reclen == 0) { |
1164 | error = ENOTEMPTY; |
1165 | break; |
1166 | } |
1167 | if (dp->d_type == DT_WHT || |
1168 | (dp->d_namlen == 1 && dp->d_name[0] == '.') || |
1169 | (dp->d_namlen == 2 && !memcmp(dp->d_name, ".." , 2))) |
1170 | continue; |
1171 | /* Check for presence in the upper layer. */ |
1172 | cn.cn_nameiop = LOOKUP; |
1173 | cn.cn_flags = ISLASTCN | RDONLY; |
1174 | cn.cn_cred = cred; |
1175 | cn.cn_nameptr = dp->d_name; |
1176 | cn.cn_namelen = dp->d_namlen; |
1177 | error = VOP_LOOKUP(un->un_uppervp, &tvp, &cn); |
1178 | if (error == ENOENT && (cn.cn_flags & ISWHITEOUT)) { |
1179 | error = 0; |
1180 | continue; |
1181 | } |
1182 | if (error == 0) |
1183 | vrele(tvp); |
1184 | error = ENOTEMPTY; |
1185 | } |
1186 | } while (error == 0 && !eofflag); |
1187 | kmem_free(dirbuf, dirlen); |
1188 | VOP_UNLOCK(un->un_lowervp); |
1189 | |
1190 | return error; |
1191 | } |
1192 | |
1193 | /* |
1194 | * This hook is called from vn_readdir() to switch to lower directory |
1195 | * entry after the upper directory is read. |
1196 | */ |
1197 | int |
1198 | union_readdirhook(struct vnode **vpp, struct file *fp, struct lwp *l) |
1199 | { |
1200 | struct vnode *vp = *vpp, *lvp; |
1201 | struct vattr va; |
1202 | int error; |
1203 | |
1204 | if (vp->v_op != union_vnodeop_p) |
1205 | return (0); |
1206 | |
1207 | /* |
1208 | * If the directory is opaque, |
1209 | * then don't show lower entries |
1210 | */ |
1211 | vn_lock(vp, LK_SHARED | LK_RETRY); |
1212 | error = VOP_GETATTR(vp, &va, fp->f_cred); |
1213 | VOP_UNLOCK(vp); |
1214 | if (error || (va.va_flags & OPAQUE)) |
1215 | return error; |
1216 | |
1217 | if ((lvp = union_dircache(vp, l)) == NULLVP) |
1218 | return (0); |
1219 | |
1220 | error = VOP_OPEN(lvp, FREAD, fp->f_cred); |
1221 | if (error) { |
1222 | vput(lvp); |
1223 | return (error); |
1224 | } |
1225 | VOP_UNLOCK(lvp); |
1226 | fp->f_vnode = lvp; |
1227 | fp->f_offset = 0; |
1228 | error = vn_close(vp, FREAD, fp->f_cred); |
1229 | if (error) |
1230 | return (error); |
1231 | *vpp = lvp; |
1232 | return (0); |
1233 | } |
1234 | |