1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
29 /*-
30 * Copyright (c) 1994 Christopher G. Demetriou
31 * Copyright (c) 1982, 1986, 1989, 1993
32 * The Regents of the University of California. All rights reserved.
33 * (c) UNIX System Laboratories, Inc.
34 * All or some portions of this file are derived from material licensed
35 * to the University of California by American Telephone and Telegraph
36 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
37 * the permission of UNIX System Laboratories, Inc.
38 *
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
41 * are met:
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 3. All advertising materials mentioning features or use of this software
48 * must display the following acknowledgement:
49 * This product includes software developed by the University of
50 * California, Berkeley and its contributors.
51 * 4. Neither the name of the University nor the names of its contributors
52 * may be used to endorse or promote products derived from this software
53 * without specific prior written permission.
54 *
55 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
56 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
57 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
58 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
59 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
60 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
61 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
62 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
63 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
64 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
65 * SUCH DAMAGE.
66 *
67 * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94
68 */
69
70 /*
71 * Some references:
72 * Bach: The Design of the UNIX Operating System (Prentice Hall, 1986)
73 * Leffler, et al.: The Design and Implementation of the 4.3BSD
74 * UNIX Operating System (Addison Welley, 1989)
75 */
76
77 #include <sys/param.h>
78 #include <sys/systm.h>
79 #include <sys/proc_internal.h>
80 #include <sys/buf_internal.h>
81 #include <sys/vnode_internal.h>
82 #include <sys/mount_internal.h>
83 #include <sys/trace.h>
84 #include <kern/kalloc.h>
85 #include <sys/resourcevar.h>
86 #include <miscfs/specfs/specdev.h>
87 #include <sys/ubc.h>
88 #include <sys/kauth.h>
89 #if DIAGNOSTIC
90 #include <kern/assert.h>
91 #endif /* DIAGNOSTIC */
92 #include <kern/task.h>
93 #include <kern/zalloc.h>
94 #include <kern/locks.h>
95 #include <kern/thread.h>
96
97 #include <sys/fslog.h> /* fslog_io_error() */
98 #include <sys/disk.h> /* dk_error_description_t */
99
100 #include <mach/mach_types.h>
101 #include <mach/memory_object_types.h>
102 #include <kern/sched_prim.h> /* thread_block() */
103
104 #include <vm/vm_kern_xnu.h>
105 #include <vm/vm_pageout_xnu.h>
106
107 #include <sys/kdebug.h>
108
109 #include <libkern/OSAtomic.h>
110 #include <libkern/OSDebug.h>
111 #include <sys/ubc_internal.h>
112
113 #include <sys/sdt.h>
114
115 int bcleanbuf(buf_t bp, boolean_t discard);
116 static int brecover_data(buf_t bp);
117 static boolean_t incore(vnode_t vp, daddr64_t blkno);
118 /* timeout is in msecs */
119 static buf_t getnewbuf(int slpflag, int slptimeo, int *queue);
120 static void bremfree_locked(buf_t bp);
121 static void buf_reassign(buf_t bp, vnode_t newvp);
122 static errno_t buf_acquire_locked(buf_t bp, int flags, int slpflag, int slptimeo);
123 static int buf_iterprepare(vnode_t vp, struct buflists *, int flags);
124 static void buf_itercomplete(vnode_t vp, struct buflists *, int flags);
125 static boolean_t buffer_cache_gc(int);
126 static buf_t buf_brelse_shadow(buf_t bp);
127 static void buf_free_meta_store(buf_t bp);
128
129 static buf_t buf_create_shadow_internal(buf_t bp, boolean_t force_copy,
130 uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg, int priv);
131
132
133 int bdwrite_internal(buf_t, int);
134
135 extern void disk_conditioner_delay(buf_t, int, int, uint64_t);
136
137 /* zone allocated buffer headers */
138 static void bcleanbuf_thread_init(void);
139 static void bcleanbuf_thread(void);
140
141 static ZONE_DEFINE_TYPE(buf_hdr_zone, "buf headers", struct buf, ZC_NONE);
142 static int buf_hdr_count;
143
144
145 /*
146 * Definitions for the buffer hash lists.
147 */
148 #define BUFHASH(dvp, lbn) \
149 (&bufhashtbl[((long)(dvp) / sizeof(*(dvp)) + (int)(lbn)) & bufhash])
150 LIST_HEAD(bufhashhdr, buf) * bufhashtbl, invalhash;
151 u_long bufhash;
152
153 static buf_t incore_locked(vnode_t vp, daddr64_t blkno, struct bufhashhdr *dp);
154
155 /* Definitions for the buffer stats. */
156 struct bufstats bufstats;
157
158 /* Number of delayed write buffers */
159 long nbdwrite = 0;
160 int blaundrycnt = 0;
161 static int boot_nbuf_headers = 0;
162
163 static TAILQ_HEAD(delayqueue, buf) delaybufqueue;
164
165 static TAILQ_HEAD(ioqueue, buf) iobufqueue;
166 static TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
167 static int needbuffer;
168 static int need_iobuffer;
169
170 static LCK_GRP_DECLARE(buf_mtx_grp, "buffer cache");
171 static LCK_ATTR_DECLARE(buf_mtx_attr, 0, 0);
172 static LCK_MTX_DECLARE_ATTR(iobuffer_mtxp, &buf_mtx_grp, &buf_mtx_attr);
173 static LCK_MTX_DECLARE_ATTR(buf_mtx, &buf_mtx_grp, &buf_mtx_attr);
174 static LCK_MTX_DECLARE_ATTR(buf_gc_callout, &buf_mtx_grp, &buf_mtx_attr);
175
176 static uint32_t buf_busycount;
177
178 #define FS_BUFFER_CACHE_GC_CALLOUTS_MAX_SIZE 16
179 typedef struct {
180 void (* callout)(int, void *);
181 void *context;
182 } fs_buffer_cache_gc_callout_t;
183
184 fs_buffer_cache_gc_callout_t fs_callouts[FS_BUFFER_CACHE_GC_CALLOUTS_MAX_SIZE] = { {NULL, NULL} };
185
186 static __inline__ int
buf_timestamp(void)187 buf_timestamp(void)
188 {
189 struct timeval t;
190 microuptime(&t);
191 return (int)t.tv_sec;
192 }
193
194 /*
195 * Insq/Remq for the buffer free lists.
196 */
197 #define binsheadfree(bp, dp, whichq) do { \
198 TAILQ_INSERT_HEAD(dp, bp, b_freelist); \
199 } while (0)
200
201 #define binstailfree(bp, dp, whichq) do { \
202 TAILQ_INSERT_TAIL(dp, bp, b_freelist); \
203 } while (0)
204
205 #define BHASHENTCHECK(bp) \
206 if ((bp)->b_hash.le_prev != (struct buf **)0xdeadbeef) \
207 panic("%p: b_hash.le_prev is not deadbeef", (bp));
208
209 #define BLISTNONE(bp) \
210 (bp)->b_hash.le_next = (struct buf *)0; \
211 (bp)->b_hash.le_prev = (struct buf **)0xdeadbeef;
212
213 /*
214 * Insq/Remq for the vnode usage lists.
215 */
216 #define bufinsvn(bp, dp) LIST_INSERT_HEAD(dp, bp, b_vnbufs)
217 #define bufremvn(bp) { \
218 LIST_REMOVE(bp, b_vnbufs); \
219 (bp)->b_vnbufs.le_next = NOLIST; \
220 }
221
222 /*
223 * Time in seconds before a buffer on a list is
224 * considered as a stale buffer
225 */
226 #define LRU_IS_STALE 120 /* default value for the LRU */
227 #define AGE_IS_STALE 60 /* default value for the AGE */
228 #define META_IS_STALE 180 /* default value for the BQ_META */
229
230 int lru_is_stale = LRU_IS_STALE;
231 int age_is_stale = AGE_IS_STALE;
232 int meta_is_stale = META_IS_STALE;
233
234 #define MAXLAUNDRY 10
235
236 /* LIST_INSERT_HEAD() with assertions */
237 static __inline__ void
blistenterhead(struct bufhashhdr * head,buf_t bp)238 blistenterhead(struct bufhashhdr * head, buf_t bp)
239 {
240 if ((bp->b_hash.le_next = (head)->lh_first) != NULL) {
241 (head)->lh_first->b_hash.le_prev = &(bp)->b_hash.le_next;
242 }
243 (head)->lh_first = bp;
244 bp->b_hash.le_prev = &(head)->lh_first;
245 if (bp->b_hash.le_prev == (struct buf **)0xdeadbeef) {
246 panic("blistenterhead: le_prev is deadbeef");
247 }
248 }
249
250 static __inline__ void
binshash(buf_t bp,struct bufhashhdr * dp)251 binshash(buf_t bp, struct bufhashhdr *dp)
252 {
253 #if DIAGNOSTIC
254 buf_t nbp;
255 #endif /* DIAGNOSTIC */
256
257 BHASHENTCHECK(bp);
258
259 #if DIAGNOSTIC
260 nbp = dp->lh_first;
261 for (; nbp != NULL; nbp = nbp->b_hash.le_next) {
262 if (nbp == bp) {
263 panic("buf already in hashlist");
264 }
265 }
266 #endif /* DIAGNOSTIC */
267
268 blistenterhead(dp, bp);
269 }
270
271 static __inline__ void
bremhash(buf_t bp)272 bremhash(buf_t bp)
273 {
274 if (bp->b_hash.le_prev == (struct buf **)0xdeadbeef) {
275 panic("bremhash le_prev is deadbeef");
276 }
277 if (bp->b_hash.le_next == bp) {
278 panic("bremhash: next points to self");
279 }
280
281 if (bp->b_hash.le_next != NULL) {
282 bp->b_hash.le_next->b_hash.le_prev = bp->b_hash.le_prev;
283 }
284 *bp->b_hash.le_prev = (bp)->b_hash.le_next;
285 }
286
287 /*
288 * buf_mtx held.
289 */
290 static __inline__ void
bmovelaundry(buf_t bp)291 bmovelaundry(buf_t bp)
292 {
293 bp->b_whichq = BQ_LAUNDRY;
294 bp->b_timestamp = buf_timestamp();
295 binstailfree(bp, &bufqueues[BQ_LAUNDRY], BQ_LAUNDRY);
296 blaundrycnt++;
297 }
298
299 static __inline__ void
buf_release_credentials(buf_t bp)300 buf_release_credentials(buf_t bp)
301 {
302 if (IS_VALID_CRED(bp->b_rcred)) {
303 kauth_cred_unref(&bp->b_rcred);
304 }
305 if (IS_VALID_CRED(bp->b_wcred)) {
306 kauth_cred_unref(&bp->b_wcred);
307 }
308 }
309
310
311 int
buf_valid(buf_t bp)312 buf_valid(buf_t bp)
313 {
314 if ((bp->b_flags & (B_DONE | B_DELWRI))) {
315 return 1;
316 }
317 return 0;
318 }
319
320 int
buf_fromcache(buf_t bp)321 buf_fromcache(buf_t bp)
322 {
323 if ((bp->b_flags & B_CACHE)) {
324 return 1;
325 }
326 return 0;
327 }
328
329 void
buf_markinvalid(buf_t bp)330 buf_markinvalid(buf_t bp)
331 {
332 SET(bp->b_flags, B_INVAL);
333 }
334
335 void
buf_markdelayed(buf_t bp)336 buf_markdelayed(buf_t bp)
337 {
338 if (!ISSET(bp->b_flags, B_DELWRI)) {
339 SET(bp->b_flags, B_DELWRI);
340
341 OSAddAtomicLong(1, &nbdwrite);
342 buf_reassign(bp, bp->b_vp);
343 }
344 SET(bp->b_flags, B_DONE);
345 }
346
347 void
buf_markclean(buf_t bp)348 buf_markclean(buf_t bp)
349 {
350 if (ISSET(bp->b_flags, B_DELWRI)) {
351 CLR(bp->b_flags, B_DELWRI);
352
353 OSAddAtomicLong(-1, &nbdwrite);
354 buf_reassign(bp, bp->b_vp);
355 }
356 }
357
358 void
buf_markeintr(buf_t bp)359 buf_markeintr(buf_t bp)
360 {
361 SET(bp->b_flags, B_EINTR);
362 }
363
364
365 void
buf_markaged(buf_t bp)366 buf_markaged(buf_t bp)
367 {
368 SET(bp->b_flags, B_AGE);
369 }
370
371 int
buf_fua(buf_t bp)372 buf_fua(buf_t bp)
373 {
374 if ((bp->b_flags & B_FUA) == B_FUA) {
375 return 1;
376 }
377 return 0;
378 }
379
380 void
buf_markfua(buf_t bp)381 buf_markfua(buf_t bp)
382 {
383 SET(bp->b_flags, B_FUA);
384 }
385
386 #if CONFIG_PROTECT
387 cpx_t
bufattr_cpx(bufattr_t bap)388 bufattr_cpx(bufattr_t bap)
389 {
390 return bap->ba_cpx;
391 }
392
393 void
bufattr_setcpx(bufattr_t bap,cpx_t cpx)394 bufattr_setcpx(bufattr_t bap, cpx_t cpx)
395 {
396 bap->ba_cpx = cpx;
397 }
398
399 void
buf_setcpoff(buf_t bp,uint64_t foffset)400 buf_setcpoff(buf_t bp, uint64_t foffset)
401 {
402 bp->b_attr.ba_cp_file_off = foffset;
403 }
404
405 uint64_t
bufattr_cpoff(bufattr_t bap)406 bufattr_cpoff(bufattr_t bap)
407 {
408 return bap->ba_cp_file_off;
409 }
410
411 void
bufattr_setcpoff(bufattr_t bap,uint64_t foffset)412 bufattr_setcpoff(bufattr_t bap, uint64_t foffset)
413 {
414 bap->ba_cp_file_off = foffset;
415 }
416
417 #else // !CONTECT_PROTECT
418
419 uint64_t
bufattr_cpoff(bufattr_t bap __unused)420 bufattr_cpoff(bufattr_t bap __unused)
421 {
422 return 0;
423 }
424
425 void
bufattr_setcpoff(__unused bufattr_t bap,__unused uint64_t foffset)426 bufattr_setcpoff(__unused bufattr_t bap, __unused uint64_t foffset)
427 {
428 return;
429 }
430
431 struct cpx *
bufattr_cpx(__unused bufattr_t bap)432 bufattr_cpx(__unused bufattr_t bap)
433 {
434 return NULL;
435 }
436
437 void
bufattr_setcpx(__unused bufattr_t bap,__unused struct cpx * cpx)438 bufattr_setcpx(__unused bufattr_t bap, __unused struct cpx *cpx)
439 {
440 }
441
442 #endif /* !CONFIG_PROTECT */
443
444 bufattr_t
bufattr_alloc(void)445 bufattr_alloc(void)
446 {
447 return kalloc_type(struct bufattr, Z_WAITOK | Z_ZERO);
448 }
449
450 void
bufattr_free(bufattr_t bap)451 bufattr_free(bufattr_t bap)
452 {
453 kfree_type(struct bufattr, bap);
454 }
455
456 bufattr_t
bufattr_dup(bufattr_t bap)457 bufattr_dup(bufattr_t bap)
458 {
459 bufattr_t new_bufattr;
460 new_bufattr = kalloc_type(struct bufattr, Z_WAITOK | Z_NOFAIL);
461
462 /* Copy the provided one into the new copy */
463 memcpy(new_bufattr, bap, sizeof(struct bufattr));
464 return new_bufattr;
465 }
466
467 int
bufattr_rawencrypted(bufattr_t bap)468 bufattr_rawencrypted(bufattr_t bap)
469 {
470 if ((bap->ba_flags & BA_RAW_ENCRYPTED_IO)) {
471 return 1;
472 }
473 return 0;
474 }
475
476 int
bufattr_throttled(bufattr_t bap)477 bufattr_throttled(bufattr_t bap)
478 {
479 return GET_BUFATTR_IO_TIER(bap);
480 }
481
482 int
bufattr_passive(bufattr_t bap)483 bufattr_passive(bufattr_t bap)
484 {
485 if ((bap->ba_flags & BA_PASSIVE)) {
486 return 1;
487 }
488 return 0;
489 }
490
491 int
bufattr_nocache(bufattr_t bap)492 bufattr_nocache(bufattr_t bap)
493 {
494 if ((bap->ba_flags & BA_NOCACHE)) {
495 return 1;
496 }
497 return 0;
498 }
499
500 int
bufattr_meta(bufattr_t bap)501 bufattr_meta(bufattr_t bap)
502 {
503 if ((bap->ba_flags & BA_META)) {
504 return 1;
505 }
506 return 0;
507 }
508
509 void
bufattr_markmeta(bufattr_t bap)510 bufattr_markmeta(bufattr_t bap)
511 {
512 SET(bap->ba_flags, BA_META);
513 }
514
515 int
bufattr_delayidlesleep(bufattr_t bap)516 bufattr_delayidlesleep(bufattr_t bap)
517 {
518 if ((bap->ba_flags & BA_DELAYIDLESLEEP)) {
519 return 1;
520 }
521 return 0;
522 }
523
524 bufattr_t
buf_attr(buf_t bp)525 buf_attr(buf_t bp)
526 {
527 return &bp->b_attr;
528 }
529
530 void
buf_markstatic(buf_t bp __unused)531 buf_markstatic(buf_t bp __unused)
532 {
533 SET(bp->b_flags, B_STATICCONTENT);
534 }
535
536 int
buf_static(buf_t bp)537 buf_static(buf_t bp)
538 {
539 if ((bp->b_flags & B_STATICCONTENT)) {
540 return 1;
541 }
542 return 0;
543 }
544
545 void
bufattr_markgreedymode(bufattr_t bap)546 bufattr_markgreedymode(bufattr_t bap)
547 {
548 SET(bap->ba_flags, BA_GREEDY_MODE);
549 }
550
551 int
bufattr_greedymode(bufattr_t bap)552 bufattr_greedymode(bufattr_t bap)
553 {
554 if ((bap->ba_flags & BA_GREEDY_MODE)) {
555 return 1;
556 }
557 return 0;
558 }
559
560 void
bufattr_markisochronous(bufattr_t bap)561 bufattr_markisochronous(bufattr_t bap)
562 {
563 SET(bap->ba_flags, BA_ISOCHRONOUS);
564 }
565
566 int
bufattr_isochronous(bufattr_t bap)567 bufattr_isochronous(bufattr_t bap)
568 {
569 if ((bap->ba_flags & BA_ISOCHRONOUS)) {
570 return 1;
571 }
572 return 0;
573 }
574
575 void
bufattr_markquickcomplete(bufattr_t bap)576 bufattr_markquickcomplete(bufattr_t bap)
577 {
578 SET(bap->ba_flags, BA_QUICK_COMPLETE);
579 }
580
581 int
bufattr_quickcomplete(bufattr_t bap)582 bufattr_quickcomplete(bufattr_t bap)
583 {
584 if ((bap->ba_flags & BA_QUICK_COMPLETE)) {
585 return 1;
586 }
587 return 0;
588 }
589
590 void
bufattr_markioscheduled(bufattr_t bap)591 bufattr_markioscheduled(bufattr_t bap)
592 {
593 SET(bap->ba_flags, BA_IO_SCHEDULED);
594 }
595
596
597 int
bufattr_ioscheduled(bufattr_t bap)598 bufattr_ioscheduled(bufattr_t bap)
599 {
600 if ((bap->ba_flags & BA_IO_SCHEDULED)) {
601 return 1;
602 }
603 return 0;
604 }
605
606 void
bufattr_markexpeditedmeta(bufattr_t bap)607 bufattr_markexpeditedmeta(bufattr_t bap)
608 {
609 SET(bap->ba_flags, BA_EXPEDITED_META_IO);
610 }
611
612 int
bufattr_expeditedmeta(bufattr_t bap)613 bufattr_expeditedmeta(bufattr_t bap)
614 {
615 if ((bap->ba_flags & BA_EXPEDITED_META_IO)) {
616 return 1;
617 }
618 return 0;
619 }
620
621 int
bufattr_willverify(bufattr_t bap)622 bufattr_willverify(bufattr_t bap)
623 {
624 if ((bap->ba_flags & BA_WILL_VERIFY)) {
625 return 1;
626 }
627 return 0;
628 }
629
630 errno_t
buf_error(buf_t bp)631 buf_error(buf_t bp)
632 {
633 return bp->b_error;
634 }
635
636 void
buf_seterror(buf_t bp,errno_t error)637 buf_seterror(buf_t bp, errno_t error)
638 {
639 if ((bp->b_error = error)) {
640 SET(bp->b_flags, B_ERROR);
641 } else {
642 CLR(bp->b_flags, B_ERROR);
643 }
644 }
645
646 void
buf_setflags(buf_t bp,int32_t flags)647 buf_setflags(buf_t bp, int32_t flags)
648 {
649 SET(bp->b_flags, (flags & BUF_X_WRFLAGS));
650 }
651
652 void
buf_clearflags(buf_t bp,int32_t flags)653 buf_clearflags(buf_t bp, int32_t flags)
654 {
655 CLR(bp->b_flags, (flags & BUF_X_WRFLAGS));
656 }
657
658 int32_t
buf_flags(buf_t bp)659 buf_flags(buf_t bp)
660 {
661 return bp->b_flags & BUF_X_RDFLAGS;
662 }
663
664 void
buf_reset(buf_t bp,int32_t io_flags)665 buf_reset(buf_t bp, int32_t io_flags)
666 {
667 CLR(bp->b_flags, (B_READ | B_WRITE | B_ERROR | B_DONE | B_INVAL | B_ASYNC | B_NOCACHE | B_FUA));
668 SET(bp->b_flags, (io_flags & (B_ASYNC | B_READ | B_WRITE | B_NOCACHE)));
669
670 bp->b_error = 0;
671 }
672
673 uint32_t
buf_count(buf_t bp)674 buf_count(buf_t bp)
675 {
676 return bp->b_bcount;
677 }
678
679 void
buf_setcount(buf_t bp,uint32_t bcount)680 buf_setcount(buf_t bp, uint32_t bcount)
681 {
682 bp->b_bcount = bcount;
683 }
684
685 uint32_t
buf_size(buf_t bp)686 buf_size(buf_t bp)
687 {
688 return bp->b_bufsize;
689 }
690
691 void
buf_setsize(buf_t bp,uint32_t bufsize)692 buf_setsize(buf_t bp, uint32_t bufsize)
693 {
694 bp->b_bufsize = bufsize;
695 }
696
697 uint32_t
buf_resid(buf_t bp)698 buf_resid(buf_t bp)
699 {
700 return bp->b_resid;
701 }
702
703 void
buf_setresid(buf_t bp,uint32_t resid)704 buf_setresid(buf_t bp, uint32_t resid)
705 {
706 bp->b_resid = resid;
707 }
708
709 uint32_t
buf_dirtyoff(buf_t bp)710 buf_dirtyoff(buf_t bp)
711 {
712 return bp->b_dirtyoff;
713 }
714
715 uint32_t
buf_dirtyend(buf_t bp)716 buf_dirtyend(buf_t bp)
717 {
718 return bp->b_dirtyend;
719 }
720
721 void
buf_setdirtyoff(buf_t bp,uint32_t dirtyoff)722 buf_setdirtyoff(buf_t bp, uint32_t dirtyoff)
723 {
724 bp->b_dirtyoff = dirtyoff;
725 }
726
727 void
buf_setdirtyend(buf_t bp,uint32_t dirtyend)728 buf_setdirtyend(buf_t bp, uint32_t dirtyend)
729 {
730 bp->b_dirtyend = dirtyend;
731 }
732
733 uintptr_t
buf_dataptr(buf_t bp)734 buf_dataptr(buf_t bp)
735 {
736 return bp->b_datap;
737 }
738
739 void
buf_setdataptr(buf_t bp,uintptr_t data)740 buf_setdataptr(buf_t bp, uintptr_t data)
741 {
742 bp->b_datap = data;
743 }
744
745 vnode_t
buf_vnode(buf_t bp)746 buf_vnode(buf_t bp)
747 {
748 return bp->b_vp;
749 }
750
751 void
buf_setvnode(buf_t bp,vnode_t vp)752 buf_setvnode(buf_t bp, vnode_t vp)
753 {
754 bp->b_vp = vp;
755 }
756
757 vnode_t
buf_vnop_vnode(buf_t bp)758 buf_vnop_vnode(buf_t bp)
759 {
760 return bp->b_vnop_vp ? bp->b_vnop_vp : bp->b_vp;
761 }
762
763 void *
buf_callback(buf_t bp)764 buf_callback(buf_t bp)
765 {
766 if (!(bp->b_flags & B_CALL)) {
767 return (void *) NULL;
768 }
769
770 return (void *)bp->b_iodone;
771 }
772
773
774 errno_t
buf_setcallback(buf_t bp,void (* callback)(buf_t,void *),void * transaction)775 buf_setcallback(buf_t bp, void (*callback)(buf_t, void *), void *transaction)
776 {
777 assert(!ISSET(bp->b_flags, B_FILTER) && ISSET(bp->b_lflags, BL_BUSY));
778
779 if (callback) {
780 bp->b_flags |= (B_CALL | B_ASYNC);
781 } else {
782 bp->b_flags &= ~B_CALL;
783 }
784 bp->b_transaction = transaction;
785 bp->b_iodone = callback;
786
787 return 0;
788 }
789
790 errno_t
buf_setupl(buf_t bp,upl_t upl,uint32_t offset)791 buf_setupl(buf_t bp, upl_t upl, uint32_t offset)
792 {
793 if (!(bp->b_lflags & BL_IOBUF)) {
794 return EINVAL;
795 }
796
797 if (upl) {
798 bp->b_flags |= B_CLUSTER;
799 } else {
800 bp->b_flags &= ~B_CLUSTER;
801 }
802 bp->b_upl = upl;
803 bp->b_uploffset = offset;
804
805 return 0;
806 }
807
808 buf_t
buf_clone(buf_t bp,int io_offset,int io_size,void (* iodone)(buf_t,void *),void * arg)809 buf_clone(buf_t bp, int io_offset, int io_size, void (*iodone)(buf_t, void *), void *arg)
810 {
811 buf_t io_bp;
812 int add1, add2;
813
814 if (io_offset < 0 || io_size < 0) {
815 return NULL;
816 }
817
818 if ((unsigned)(io_offset + io_size) > (unsigned)bp->b_bcount) {
819 return NULL;
820 }
821
822 if (bp->b_flags & B_CLUSTER) {
823 if (io_offset && ((bp->b_uploffset + io_offset) & PAGE_MASK)) {
824 return NULL;
825 }
826
827 if (os_add_overflow(io_offset, io_size, &add1) || os_add_overflow(add1, bp->b_uploffset, &add2)) {
828 return NULL;
829 }
830 if ((add2 & PAGE_MASK) && ((uint32_t)add1 < (uint32_t)bp->b_bcount)) {
831 return NULL;
832 }
833 }
834 io_bp = alloc_io_buf(bp->b_vp, 0);
835
836 io_bp->b_flags = bp->b_flags & (B_COMMIT_UPL | B_META | B_PAGEIO | B_CLUSTER | B_PHYS | B_RAW | B_ASYNC | B_READ | B_FUA);
837
838 if (iodone) {
839 io_bp->b_transaction = arg;
840 io_bp->b_iodone = iodone;
841 io_bp->b_flags |= B_CALL;
842 }
843 if (bp->b_flags & B_CLUSTER) {
844 io_bp->b_upl = bp->b_upl;
845 io_bp->b_uploffset = bp->b_uploffset + io_offset;
846 } else {
847 io_bp->b_datap = (uintptr_t)(((char *)bp->b_datap) + io_offset);
848 }
849 io_bp->b_bcount = io_size;
850
851 return io_bp;
852 }
853
854
855 int
buf_shadow(buf_t bp)856 buf_shadow(buf_t bp)
857 {
858 if (bp->b_lflags & BL_SHADOW) {
859 return 1;
860 }
861 return 0;
862 }
863
864
865 buf_t
buf_create_shadow_priv(buf_t bp,boolean_t force_copy,uintptr_t external_storage,void (* iodone)(buf_t,void *),void * arg)866 buf_create_shadow_priv(buf_t bp, boolean_t force_copy, uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg)
867 {
868 return buf_create_shadow_internal(bp, force_copy, external_storage, iodone, arg, 1);
869 }
870
871 buf_t
buf_create_shadow(buf_t bp,boolean_t force_copy,uintptr_t external_storage,void (* iodone)(buf_t,void *),void * arg)872 buf_create_shadow(buf_t bp, boolean_t force_copy, uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg)
873 {
874 return buf_create_shadow_internal(bp, force_copy, external_storage, iodone, arg, 0);
875 }
876
877
878 static buf_t
buf_create_shadow_internal(buf_t bp,boolean_t force_copy,uintptr_t external_storage,void (* iodone)(buf_t,void *),void * arg,int priv)879 buf_create_shadow_internal(buf_t bp, boolean_t force_copy, uintptr_t external_storage, void (*iodone)(buf_t, void *), void *arg, int priv)
880 {
881 buf_t io_bp;
882
883 KERNEL_DEBUG(0xbbbbc000 | DBG_FUNC_START, bp, 0, 0, 0, 0);
884
885 if (!(bp->b_flags & B_META) || (bp->b_lflags & BL_IOBUF)) {
886 KERNEL_DEBUG(0xbbbbc000 | DBG_FUNC_END, bp, 0, 0, 0, 0);
887 return NULL;
888 }
889 #ifdef BUF_MAKE_PRIVATE
890 if (bp->b_shadow_ref && bp->b_data_ref == 0 && external_storage == 0) {
891 panic("buf_create_shadow: %p is in the private state (%d, %d)", bp, bp->b_shadow_ref, bp->b_data_ref);
892 }
893 #endif
894 io_bp = alloc_io_buf(bp->b_vp, priv);
895
896 io_bp->b_flags = bp->b_flags & (B_META | B_ZALLOC | B_ASYNC | B_READ | B_FUA);
897 io_bp->b_blkno = bp->b_blkno;
898 io_bp->b_lblkno = bp->b_lblkno;
899 io_bp->b_lblksize = bp->b_lblksize;
900
901 if (iodone) {
902 io_bp->b_transaction = arg;
903 io_bp->b_iodone = iodone;
904 io_bp->b_flags |= B_CALL;
905 }
906 if (force_copy == FALSE) {
907 io_bp->b_bcount = bp->b_bcount;
908 io_bp->b_bufsize = bp->b_bufsize;
909
910 if (external_storage) {
911 io_bp->b_datap = external_storage;
912 #ifdef BUF_MAKE_PRIVATE
913 io_bp->b_data_store = NULL;
914 #endif
915 } else {
916 io_bp->b_datap = bp->b_datap;
917 #ifdef BUF_MAKE_PRIVATE
918 io_bp->b_data_store = bp;
919 #endif
920 }
921 *(buf_t *)(&io_bp->b_orig) = bp;
922
923 lck_mtx_lock_spin(&buf_mtx);
924
925 io_bp->b_lflags |= BL_SHADOW;
926 io_bp->b_shadow = bp->b_shadow;
927 bp->b_shadow = io_bp;
928 bp->b_shadow_ref++;
929
930 #ifdef BUF_MAKE_PRIVATE
931 if (external_storage) {
932 io_bp->b_lflags |= BL_EXTERNAL;
933 } else {
934 bp->b_data_ref++;
935 }
936 #endif
937 lck_mtx_unlock(&buf_mtx);
938 } else {
939 if (external_storage) {
940 #ifdef BUF_MAKE_PRIVATE
941 io_bp->b_lflags |= BL_EXTERNAL;
942 #endif
943 io_bp->b_bcount = bp->b_bcount;
944 io_bp->b_bufsize = bp->b_bufsize;
945 io_bp->b_datap = external_storage;
946 } else {
947 allocbuf(io_bp, bp->b_bcount);
948
949 io_bp->b_lflags |= BL_IOBUF_ALLOC;
950 }
951 bcopy((caddr_t)bp->b_datap, (caddr_t)io_bp->b_datap, bp->b_bcount);
952
953 #ifdef BUF_MAKE_PRIVATE
954 io_bp->b_data_store = NULL;
955 #endif
956 }
957 KERNEL_DEBUG(0xbbbbc000 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, io_bp, 0);
958
959 return io_bp;
960 }
961
962
963 #ifdef BUF_MAKE_PRIVATE
964 errno_t
buf_make_private(buf_t bp)965 buf_make_private(buf_t bp)
966 {
967 buf_t ds_bp;
968 buf_t t_bp;
969 struct buf my_buf;
970
971 KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_START, bp, bp->b_shadow_ref, 0, 0, 0);
972
973 if (bp->b_shadow_ref == 0 || bp->b_data_ref == 0 || ISSET(bp->b_lflags, BL_SHADOW)) {
974 KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, EINVAL, 0);
975 return EINVAL;
976 }
977 my_buf.b_flags = B_META;
978 my_buf.b_datap = (uintptr_t)NULL;
979 allocbuf(&my_buf, bp->b_bcount);
980
981 bcopy((caddr_t)bp->b_datap, (caddr_t)my_buf.b_datap, bp->b_bcount);
982
983 lck_mtx_lock_spin(&buf_mtx);
984
985 for (t_bp = bp->b_shadow; t_bp; t_bp = t_bp->b_shadow) {
986 if (!ISSET(bp->b_lflags, BL_EXTERNAL)) {
987 break;
988 }
989 }
990 ds_bp = t_bp;
991
992 if (ds_bp == NULL && bp->b_data_ref) {
993 panic("buf_make_private: b_data_ref != 0 && ds_bp == NULL");
994 }
995
996 if (ds_bp && (bp->b_data_ref == 0 || bp->b_shadow_ref == 0)) {
997 panic("buf_make_private: ref_count == 0 && ds_bp != NULL");
998 }
999
1000 if (ds_bp == NULL) {
1001 lck_mtx_unlock(&buf_mtx);
1002
1003 buf_free_meta_store(&my_buf);
1004
1005 KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, EINVAL, 0);
1006 return EINVAL;
1007 }
1008 for (t_bp = bp->b_shadow; t_bp; t_bp = t_bp->b_shadow) {
1009 if (!ISSET(t_bp->b_lflags, BL_EXTERNAL)) {
1010 t_bp->b_data_store = ds_bp;
1011 }
1012 }
1013 ds_bp->b_data_ref = bp->b_data_ref;
1014
1015 bp->b_data_ref = 0;
1016 bp->b_datap = my_buf.b_datap;
1017
1018 lck_mtx_unlock(&buf_mtx);
1019
1020 KERNEL_DEBUG(0xbbbbc004 | DBG_FUNC_END, bp, bp->b_shadow_ref, 0, 0, 0);
1021 return 0;
1022 }
1023 #endif
1024
1025
1026 void
buf_setfilter(buf_t bp,void (* filter)(buf_t,void *),void * transaction,void (** old_iodone)(buf_t,void *),void ** old_transaction)1027 buf_setfilter(buf_t bp, void (*filter)(buf_t, void *), void *transaction,
1028 void(**old_iodone)(buf_t, void *), void **old_transaction)
1029 {
1030 assert(ISSET(bp->b_lflags, BL_BUSY));
1031
1032 if (old_iodone) {
1033 *old_iodone = bp->b_iodone;
1034 }
1035 if (old_transaction) {
1036 *old_transaction = bp->b_transaction;
1037 }
1038
1039 bp->b_transaction = transaction;
1040 bp->b_iodone = filter;
1041 if (filter) {
1042 bp->b_flags |= B_FILTER;
1043 } else {
1044 bp->b_flags &= ~B_FILTER;
1045 }
1046 }
1047
1048
1049 daddr64_t
buf_blkno(buf_t bp)1050 buf_blkno(buf_t bp)
1051 {
1052 return bp->b_blkno;
1053 }
1054
1055 daddr64_t
buf_lblkno(buf_t bp)1056 buf_lblkno(buf_t bp)
1057 {
1058 return bp->b_lblkno;
1059 }
1060
1061 uint32_t
buf_lblksize(buf_t bp)1062 buf_lblksize(buf_t bp)
1063 {
1064 if (bp->b_flags & B_CLUSTER) {
1065 return CLUSTER_IO_BLOCK_SIZE;
1066 } else {
1067 return (uint32_t)(bp->b_lblksize);
1068 }
1069 }
1070
1071 void
buf_setblkno(buf_t bp,daddr64_t blkno)1072 buf_setblkno(buf_t bp, daddr64_t blkno)
1073 {
1074 bp->b_blkno = blkno;
1075 }
1076
1077 void
buf_setlblkno(buf_t bp,daddr64_t lblkno)1078 buf_setlblkno(buf_t bp, daddr64_t lblkno)
1079 {
1080 bp->b_lblkno = lblkno;
1081 }
1082
1083 void
buf_setlblksize(buf_t bp,uint32_t lblksize)1084 buf_setlblksize(buf_t bp, uint32_t lblksize)
1085 {
1086 if (!(bp->b_flags & B_CLUSTER)) {
1087 bp->b_lblksize = lblksize;
1088 }
1089 }
1090
1091 dev_t
buf_device(buf_t bp)1092 buf_device(buf_t bp)
1093 {
1094 return bp->b_dev;
1095 }
1096
1097 errno_t
buf_setdevice(buf_t bp,vnode_t vp)1098 buf_setdevice(buf_t bp, vnode_t vp)
1099 {
1100 if ((vp->v_type != VBLK) && (vp->v_type != VCHR)) {
1101 return EINVAL;
1102 }
1103 bp->b_dev = vp->v_rdev;
1104
1105 return 0;
1106 }
1107
1108
1109 void *
buf_drvdata(buf_t bp)1110 buf_drvdata(buf_t bp)
1111 {
1112 return bp->b_drvdata;
1113 }
1114
1115 void
buf_setdrvdata(buf_t bp,void * drvdata)1116 buf_setdrvdata(buf_t bp, void *drvdata)
1117 {
1118 bp->b_drvdata = drvdata;
1119 }
1120
1121 void *
buf_fsprivate(buf_t bp)1122 buf_fsprivate(buf_t bp)
1123 {
1124 return bp->b_fsprivate;
1125 }
1126
1127 void
buf_setfsprivate(buf_t bp,void * fsprivate)1128 buf_setfsprivate(buf_t bp, void *fsprivate)
1129 {
1130 bp->b_fsprivate = fsprivate;
1131 }
1132
1133 kauth_cred_t
buf_rcred(buf_t bp)1134 buf_rcred(buf_t bp)
1135 {
1136 return bp->b_rcred;
1137 }
1138
1139 kauth_cred_t
buf_wcred(buf_t bp)1140 buf_wcred(buf_t bp)
1141 {
1142 return bp->b_wcred;
1143 }
1144
1145 void *
buf_upl(buf_t bp)1146 buf_upl(buf_t bp)
1147 {
1148 return bp->b_upl;
1149 }
1150
1151 uint32_t
buf_uploffset(buf_t bp)1152 buf_uploffset(buf_t bp)
1153 {
1154 return (uint32_t)(bp->b_uploffset);
1155 }
1156
1157 proc_t
buf_proc(buf_t bp)1158 buf_proc(buf_t bp)
1159 {
1160 return bp->b_proc;
1161 }
1162
1163
1164 static errno_t
buf_map_range_internal(buf_t bp,caddr_t * io_addr,boolean_t legacymode,vm_prot_t prot)1165 buf_map_range_internal(buf_t bp, caddr_t *io_addr, boolean_t legacymode,
1166 vm_prot_t prot)
1167 {
1168 buf_t real_bp;
1169 vm_offset_t vaddr;
1170 kern_return_t kret;
1171
1172 if (!(bp->b_flags & B_CLUSTER)) {
1173 *io_addr = (caddr_t)bp->b_datap;
1174 return 0;
1175 }
1176 real_bp = (buf_t)(bp->b_real_bp);
1177
1178 if (real_bp && real_bp->b_datap) {
1179 /*
1180 * b_real_bp is only valid if B_CLUSTER is SET
1181 * if it's non-zero, than someone did a cluster_bp call
1182 * if the backing physical pages were already mapped
1183 * in before the call to cluster_bp (non-zero b_datap),
1184 * than we just use that mapping
1185 */
1186 *io_addr = (caddr_t)real_bp->b_datap;
1187 return 0;
1188 }
1189
1190 if (legacymode) {
1191 kret = ubc_upl_map(bp->b_upl, &vaddr); /* Map it in */
1192 if (kret == KERN_SUCCESS) {
1193 vaddr += bp->b_uploffset;
1194 }
1195 } else {
1196 upl_t upl = bp->b_upl;
1197 upl_set_map_exclusive(upl);
1198 kret = ubc_upl_map_range(upl, bp->b_uploffset, bp->b_bcount, prot, &vaddr); /* Map it in */
1199 if (kret != KERN_SUCCESS) {
1200 upl_clear_map_exclusive(upl);
1201 }
1202 }
1203
1204 if (kret != KERN_SUCCESS) {
1205 *io_addr = NULL;
1206
1207 return ENOMEM;
1208 }
1209
1210 *io_addr = (caddr_t)vaddr;
1211
1212 return 0;
1213 }
1214
1215 errno_t
buf_map_range(buf_t bp,caddr_t * io_addr)1216 buf_map_range(buf_t bp, caddr_t *io_addr)
1217 {
1218 return buf_map_range_internal(bp, io_addr, false, VM_PROT_DEFAULT);
1219 }
1220
1221 errno_t
buf_map_range_with_prot(buf_t bp,caddr_t * io_addr,vm_prot_t prot)1222 buf_map_range_with_prot(buf_t bp, caddr_t *io_addr, vm_prot_t prot)
1223 {
1224 /* Only VM_PROT_READ and/or VM_PROT_WRITE is allowed. */
1225 prot &= (VM_PROT_READ | VM_PROT_WRITE);
1226 if (prot == VM_PROT_NONE) {
1227 *io_addr = NULL;
1228 return EINVAL;
1229 }
1230
1231 return buf_map_range_internal(bp, io_addr, false, prot);
1232 }
1233
1234 errno_t
buf_map(buf_t bp,caddr_t * io_addr)1235 buf_map(buf_t bp, caddr_t *io_addr)
1236 {
1237 return buf_map_range_internal(bp, io_addr, true, VM_PROT_DEFAULT);
1238 }
1239
1240 static errno_t
buf_unmap_range_internal(buf_t bp,boolean_t legacymode)1241 buf_unmap_range_internal(buf_t bp, boolean_t legacymode)
1242 {
1243 buf_t real_bp;
1244 kern_return_t kret;
1245
1246 if (!(bp->b_flags & B_CLUSTER)) {
1247 return 0;
1248 }
1249 /*
1250 * see buf_map for the explanation
1251 */
1252 real_bp = (buf_t)(bp->b_real_bp);
1253
1254 if (real_bp && real_bp->b_datap) {
1255 return 0;
1256 }
1257
1258 if ((bp->b_lflags & BL_IOBUF) &&
1259 ((bp->b_flags & (B_PAGEIO | B_READ)) != (B_PAGEIO | B_READ))) {
1260 /*
1261 * ignore pageins... the 'right' thing will
1262 * happen due to the way we handle speculative
1263 * clusters...
1264 *
1265 * when we commit these pages, we'll hit
1266 * it with UPL_COMMIT_INACTIVE which
1267 * will clear the reference bit that got
1268 * turned on when we touched the mapping
1269 */
1270 bp->b_flags |= B_AGE;
1271 }
1272
1273 if (legacymode) {
1274 kret = ubc_upl_unmap(bp->b_upl);
1275 } else {
1276 kret = ubc_upl_unmap_range(bp->b_upl, bp->b_uploffset, bp->b_bcount);
1277 upl_clear_map_exclusive(bp->b_upl);
1278 }
1279
1280 if (kret != KERN_SUCCESS) {
1281 return EINVAL;
1282 }
1283 return 0;
1284 }
1285
1286 errno_t
buf_unmap_range(buf_t bp)1287 buf_unmap_range(buf_t bp)
1288 {
1289 return buf_unmap_range_internal(bp, false);
1290 }
1291
1292 errno_t
buf_unmap(buf_t bp)1293 buf_unmap(buf_t bp)
1294 {
1295 return buf_unmap_range_internal(bp, true);
1296 }
1297
1298
1299 void
buf_clear(buf_t bp)1300 buf_clear(buf_t bp)
1301 {
1302 caddr_t baddr;
1303
1304 if (buf_map(bp, &baddr) == 0) {
1305 bzero(baddr, bp->b_bcount);
1306 buf_unmap(bp);
1307 }
1308 bp->b_resid = 0;
1309 }
1310
1311 /*
1312 * Read or write a buffer that is not contiguous on disk.
1313 * buffer is marked done/error at the conclusion
1314 */
1315 static int
buf_strategy_fragmented(vnode_t devvp,buf_t bp,off_t f_offset,size_t contig_bytes)1316 buf_strategy_fragmented(vnode_t devvp, buf_t bp, off_t f_offset, size_t contig_bytes)
1317 {
1318 vnode_t vp = buf_vnode(bp);
1319 buf_t io_bp; /* For reading or writing a single block */
1320 int io_direction;
1321 int io_resid;
1322 size_t io_contig_bytes;
1323 daddr64_t io_blkno;
1324 int error = 0;
1325 int bmap_flags;
1326
1327 /*
1328 * save our starting point... the bp was already mapped
1329 * in buf_strategy before we got called
1330 * no sense doing it again.
1331 */
1332 io_blkno = bp->b_blkno;
1333 /*
1334 * Make sure we redo this mapping for the next I/O
1335 * i.e. this can never be a 'permanent' mapping
1336 */
1337 bp->b_blkno = bp->b_lblkno;
1338
1339 /*
1340 * Get an io buffer to do the deblocking
1341 */
1342 io_bp = alloc_io_buf(devvp, 0);
1343
1344 io_bp->b_lblkno = bp->b_lblkno;
1345 io_bp->b_lblksize = bp->b_lblksize;
1346 io_bp->b_datap = bp->b_datap;
1347 io_resid = bp->b_bcount;
1348 io_direction = bp->b_flags & B_READ;
1349 io_contig_bytes = contig_bytes;
1350
1351 if (bp->b_flags & B_READ) {
1352 bmap_flags = VNODE_READ;
1353 } else {
1354 bmap_flags = VNODE_WRITE;
1355 }
1356
1357 for (;;) {
1358 if (io_blkno == -1) {
1359 /*
1360 * this is unexepected, but we'll allow for it
1361 */
1362 bzero((caddr_t)io_bp->b_datap, (int)io_contig_bytes);
1363 } else {
1364 io_bp->b_bcount = (uint32_t)io_contig_bytes;
1365 io_bp->b_bufsize = (uint32_t)io_contig_bytes;
1366 io_bp->b_resid = (uint32_t)io_contig_bytes;
1367 io_bp->b_blkno = io_blkno;
1368
1369 buf_reset(io_bp, io_direction);
1370
1371 /*
1372 * Call the device to do the I/O and wait for it. Make sure the appropriate party is charged for write
1373 */
1374
1375 if (!ISSET(bp->b_flags, B_READ)) {
1376 OSAddAtomic(1, &devvp->v_numoutput);
1377 }
1378
1379 if ((error = VNOP_STRATEGY(io_bp))) {
1380 break;
1381 }
1382 if ((error = (int)buf_biowait(io_bp))) {
1383 break;
1384 }
1385 if (io_bp->b_resid) {
1386 io_resid -= (io_contig_bytes - io_bp->b_resid);
1387 break;
1388 }
1389 }
1390 if ((io_resid -= io_contig_bytes) == 0) {
1391 break;
1392 }
1393 f_offset += io_contig_bytes;
1394 io_bp->b_datap += io_contig_bytes;
1395
1396 /*
1397 * Map the current position to a physical block number
1398 */
1399 if ((error = VNOP_BLOCKMAP(vp, f_offset, io_resid, &io_blkno, &io_contig_bytes, NULL, bmap_flags, NULL))) {
1400 break;
1401 }
1402 }
1403 buf_free(io_bp);
1404
1405 if (error) {
1406 buf_seterror(bp, error);
1407 }
1408 bp->b_resid = io_resid;
1409 /*
1410 * This I/O is now complete
1411 */
1412 buf_biodone(bp);
1413
1414 return error;
1415 }
1416
1417
1418 /*
1419 * struct vnop_strategy_args {
1420 * struct buf *a_bp;
1421 * } *ap;
1422 */
1423 errno_t
buf_strategy(vnode_t devvp,void * ap)1424 buf_strategy(vnode_t devvp, void *ap)
1425 {
1426 buf_t bp = ((struct vnop_strategy_args *)ap)->a_bp;
1427 vnode_t vp = bp->b_vp;
1428 int bmap_flags;
1429 errno_t error;
1430 #if CONFIG_DTRACE
1431 int dtrace_io_start_flag = 0; /* We only want to trip the io:::start
1432 * probe once, with the true physical
1433 * block in place (b_blkno)
1434 */
1435
1436 #endif
1437
1438 if (vp == NULL || vp->v_type == VCHR || vp->v_type == VBLK) {
1439 panic("buf_strategy: b_vp == NULL || vtype == VCHR | VBLK");
1440 }
1441 /*
1442 * associate the physical device with
1443 * with this buf_t even if we don't
1444 * end up issuing the I/O...
1445 */
1446 bp->b_dev = devvp->v_rdev;
1447
1448 if (bp->b_flags & B_READ) {
1449 bmap_flags = VNODE_READ;
1450 } else {
1451 bmap_flags = VNODE_WRITE;
1452 }
1453
1454 if (!(bp->b_flags & B_CLUSTER)) {
1455 if ((bp->b_upl)) {
1456 /*
1457 * we have a UPL associated with this bp
1458 * go through cluster_bp which knows how
1459 * to deal with filesystem block sizes
1460 * that aren't equal to the page size
1461 */
1462 DTRACE_IO1(start, buf_t, bp);
1463 return cluster_bp(bp);
1464 }
1465 if (bp->b_blkno == bp->b_lblkno) {
1466 off_t f_offset;
1467 size_t contig_bytes;
1468
1469 if (bp->b_lblksize && bp->b_lblkno >= 0) {
1470 f_offset = bp->b_lblkno * bp->b_lblksize;
1471 } else if ((error = VNOP_BLKTOOFF(vp, bp->b_lblkno, &f_offset))) {
1472 DTRACE_IO1(start, buf_t, bp);
1473 buf_seterror(bp, error);
1474 buf_biodone(bp);
1475
1476 return error;
1477 }
1478
1479 if ((error = VNOP_BLOCKMAP(vp, f_offset, bp->b_bcount, &bp->b_blkno, &contig_bytes, NULL, bmap_flags, NULL))) {
1480 DTRACE_IO1(start, buf_t, bp);
1481 buf_seterror(bp, error);
1482 buf_biodone(bp);
1483
1484 return error;
1485 }
1486
1487 DTRACE_IO1(start, buf_t, bp);
1488 #if CONFIG_DTRACE
1489 dtrace_io_start_flag = 1;
1490 #endif /* CONFIG_DTRACE */
1491
1492 if ((bp->b_blkno == -1) || (contig_bytes == 0)) {
1493 /* Set block number to force biodone later */
1494 bp->b_blkno = -1;
1495 buf_clear(bp);
1496 } else if (contig_bytes < (size_t)bp->b_bcount) {
1497 return buf_strategy_fragmented(devvp, bp, f_offset, contig_bytes);
1498 }
1499 }
1500
1501 #if CONFIG_DTRACE
1502 if (dtrace_io_start_flag == 0) {
1503 DTRACE_IO1(start, buf_t, bp);
1504 dtrace_io_start_flag = 1;
1505 }
1506 #endif /* CONFIG_DTRACE */
1507
1508 if (bp->b_blkno == -1) {
1509 buf_biodone(bp);
1510 return 0;
1511 }
1512 }
1513
1514 #if CONFIG_DTRACE
1515 if (dtrace_io_start_flag == 0) {
1516 DTRACE_IO1(start, buf_t, bp);
1517 }
1518 #endif /* CONFIG_DTRACE */
1519
1520 #if CONFIG_PROTECT
1521 /* Capture f_offset in the bufattr*/
1522 cpx_t cpx = bufattr_cpx(buf_attr(bp));
1523 if (cpx) {
1524 /* No need to go here for older EAs */
1525 if (cpx_use_offset_for_iv(cpx) && !cpx_synthetic_offset_for_iv(cpx)) {
1526 off_t f_offset;
1527
1528 if (bp->b_flags & B_CLUSTER) {
1529 f_offset = bp->b_lblkno * CLUSTER_IO_BLOCK_SIZE;
1530 } else if ((error = VNOP_BLKTOOFF(bp->b_vp, bp->b_lblkno, &f_offset))) {
1531 return error;
1532 }
1533
1534 /*
1535 * Attach the file offset to this buffer. The
1536 * bufattr attributes will be passed down the stack
1537 * until they reach the storage driver (whether
1538 * IOFlashStorage, ASP, or IONVMe). The driver
1539 * will retain the offset in a local variable when it
1540 * issues its I/Os to the NAND controller.
1541 *
1542 * Note that LwVM may end up splitting this I/O
1543 * into sub-I/Os if it crosses a chunk boundary. In this
1544 * case, LwVM will update this field when it dispatches
1545 * each I/O to IOFlashStorage. But from our perspective
1546 * we have only issued a single I/O.
1547 *
1548 * In the case of APFS we do not bounce through another
1549 * intermediate layer (such as CoreStorage). APFS will
1550 * issue the I/Os directly to the block device / IOMedia
1551 * via buf_strategy on the specfs node.
1552 */
1553 buf_setcpoff(bp, f_offset);
1554 CP_DEBUG((CPDBG_OFFSET_IO | DBG_FUNC_NONE), (uint32_t) f_offset, (uint32_t) bp->b_lblkno, (uint32_t) bp->b_blkno, (uint32_t) bp->b_bcount, 0);
1555 }
1556 }
1557 #endif
1558
1559 /*
1560 * we can issue the I/O because...
1561 * either B_CLUSTER is set which
1562 * means that the I/O is properly set
1563 * up to be a multiple of the page size, or
1564 * we were able to successfully set up the
1565 * physical block mapping
1566 */
1567 bp->b_vnop_vp = devvp;
1568 error = VOCALL(devvp->v_op, VOFFSET(vnop_strategy), ap);
1569 bp->b_vnop_vp = NULLVP;
1570 DTRACE_FSINFO(strategy, vnode_t, vp);
1571 return error;
1572 }
1573
1574
1575
1576 buf_t
buf_alloc(vnode_t vp)1577 buf_alloc(vnode_t vp)
1578 {
1579 return alloc_io_buf(vp, is_vm_privileged());
1580 }
1581
1582 void
buf_free(buf_t bp)1583 buf_free(buf_t bp)
1584 {
1585 free_io_buf(bp);
1586 }
1587
1588
1589 /*
1590 * iterate buffers for the specified vp.
1591 * if BUF_SCAN_DIRTY is set, do the dirty list
1592 * if BUF_SCAN_CLEAN is set, do the clean list
1593 * if neither flag is set, default to BUF_SCAN_DIRTY
1594 * if BUF_NOTIFY_BUSY is set, call the callout function using a NULL bp for busy pages
1595 */
1596
1597 struct buf_iterate_info_t {
1598 int flag;
1599 struct buflists *listhead;
1600 };
1601
1602 void
buf_iterate(vnode_t vp,int (* callout)(buf_t,void *),int flags,void * arg)1603 buf_iterate(vnode_t vp, int (*callout)(buf_t, void *), int flags, void *arg)
1604 {
1605 buf_t bp;
1606 int retval;
1607 struct buflists local_iterblkhd;
1608 int lock_flags = BAC_NOWAIT | BAC_REMOVE;
1609 int notify_busy = flags & BUF_NOTIFY_BUSY;
1610 struct buf_iterate_info_t list[2];
1611 int num_lists, i;
1612
1613 if (flags & BUF_SKIP_LOCKED) {
1614 lock_flags |= BAC_SKIP_LOCKED;
1615 }
1616 if (flags & BUF_SKIP_NONLOCKED) {
1617 lock_flags |= BAC_SKIP_NONLOCKED;
1618 }
1619
1620 if (!(flags & (BUF_SCAN_DIRTY | BUF_SCAN_CLEAN))) {
1621 flags |= BUF_SCAN_DIRTY;
1622 }
1623
1624 num_lists = 0;
1625
1626 if (flags & BUF_SCAN_DIRTY) {
1627 list[num_lists].flag = VBI_DIRTY;
1628 list[num_lists].listhead = &vp->v_dirtyblkhd;
1629 num_lists++;
1630 }
1631 if (flags & BUF_SCAN_CLEAN) {
1632 list[num_lists].flag = VBI_CLEAN;
1633 list[num_lists].listhead = &vp->v_cleanblkhd;
1634 num_lists++;
1635 }
1636
1637 for (i = 0; i < num_lists; i++) {
1638 lck_mtx_lock(&buf_mtx);
1639
1640 if (buf_iterprepare(vp, &local_iterblkhd, list[i].flag)) {
1641 lck_mtx_unlock(&buf_mtx);
1642 continue;
1643 }
1644 while (!LIST_EMPTY(&local_iterblkhd)) {
1645 bp = LIST_FIRST(&local_iterblkhd);
1646 LIST_REMOVE(bp, b_vnbufs);
1647 LIST_INSERT_HEAD(list[i].listhead, bp, b_vnbufs);
1648
1649 if (buf_acquire_locked(bp, lock_flags, 0, 0)) {
1650 if (notify_busy) {
1651 bp = NULL;
1652 } else {
1653 continue;
1654 }
1655 }
1656
1657 lck_mtx_unlock(&buf_mtx);
1658
1659 retval = callout(bp, arg);
1660
1661 switch (retval) {
1662 case BUF_RETURNED:
1663 if (bp) {
1664 buf_brelse(bp);
1665 }
1666 break;
1667 case BUF_CLAIMED:
1668 break;
1669 case BUF_RETURNED_DONE:
1670 if (bp) {
1671 buf_brelse(bp);
1672 }
1673 lck_mtx_lock(&buf_mtx);
1674 goto out;
1675 case BUF_CLAIMED_DONE:
1676 lck_mtx_lock(&buf_mtx);
1677 goto out;
1678 }
1679 lck_mtx_lock(&buf_mtx);
1680 } /* while list has more nodes */
1681 out:
1682 buf_itercomplete(vp, &local_iterblkhd, list[i].flag);
1683 lck_mtx_unlock(&buf_mtx);
1684 } /* for each list */
1685 } /* buf_iterate */
1686
1687
1688 /*
1689 * Flush out and invalidate all buffers associated with a vnode.
1690 */
1691 int
buf_invalidateblks(vnode_t vp,int flags,int slpflag,int slptimeo)1692 buf_invalidateblks(vnode_t vp, int flags, int slpflag, int slptimeo)
1693 {
1694 buf_t bp;
1695 int aflags;
1696 int error = 0;
1697 int must_rescan = 1;
1698 struct buflists local_iterblkhd;
1699
1700
1701 if (LIST_EMPTY(&vp->v_cleanblkhd) && LIST_EMPTY(&vp->v_dirtyblkhd)) {
1702 return 0;
1703 }
1704
1705 lck_mtx_lock(&buf_mtx);
1706
1707 for (;;) {
1708 if (must_rescan == 0) {
1709 /*
1710 * the lists may not be empty, but all that's left at this
1711 * point are metadata or B_LOCKED buffers which are being
1712 * skipped... we know this because we made it through both
1713 * the clean and dirty lists without dropping buf_mtx...
1714 * each time we drop buf_mtx we bump "must_rescan"
1715 */
1716 break;
1717 }
1718 if (LIST_EMPTY(&vp->v_cleanblkhd) && LIST_EMPTY(&vp->v_dirtyblkhd)) {
1719 break;
1720 }
1721 must_rescan = 0;
1722 /*
1723 * iterate the clean list
1724 */
1725 if (buf_iterprepare(vp, &local_iterblkhd, VBI_CLEAN)) {
1726 goto try_dirty_list;
1727 }
1728 while (!LIST_EMPTY(&local_iterblkhd)) {
1729 bp = LIST_FIRST(&local_iterblkhd);
1730
1731 LIST_REMOVE(bp, b_vnbufs);
1732 LIST_INSERT_HEAD(&vp->v_cleanblkhd, bp, b_vnbufs);
1733
1734 /*
1735 * some filesystems distinguish meta data blocks with a negative logical block #
1736 */
1737 if ((flags & BUF_SKIP_META) && (bp->b_lblkno < 0 || ISSET(bp->b_flags, B_META))) {
1738 continue;
1739 }
1740
1741 aflags = BAC_REMOVE;
1742
1743 if (!(flags & BUF_INVALIDATE_LOCKED)) {
1744 aflags |= BAC_SKIP_LOCKED;
1745 }
1746
1747 if ((error = (int)buf_acquire_locked(bp, aflags, slpflag, slptimeo))) {
1748 if (error == EDEADLK) {
1749 /*
1750 * this buffer was marked B_LOCKED...
1751 * we didn't drop buf_mtx, so we
1752 * we don't need to rescan
1753 */
1754 continue;
1755 }
1756 if (error == EAGAIN) {
1757 /*
1758 * found a busy buffer... we blocked and
1759 * dropped buf_mtx, so we're going to
1760 * need to rescan after this pass is completed
1761 */
1762 must_rescan++;
1763 continue;
1764 }
1765 /*
1766 * got some kind of 'real' error out of the msleep
1767 * in buf_acquire_locked, terminate the scan and return the error
1768 */
1769 buf_itercomplete(vp, &local_iterblkhd, VBI_CLEAN);
1770
1771 lck_mtx_unlock(&buf_mtx);
1772 return error;
1773 }
1774 lck_mtx_unlock(&buf_mtx);
1775
1776 if (bp->b_flags & B_LOCKED) {
1777 KERNEL_DEBUG(0xbbbbc038, bp, 0, 0, 0, 0);
1778 }
1779
1780 CLR(bp->b_flags, B_LOCKED);
1781 SET(bp->b_flags, B_INVAL);
1782 buf_brelse(bp);
1783
1784 lck_mtx_lock(&buf_mtx);
1785
1786 /*
1787 * by dropping buf_mtx, we allow new
1788 * buffers to be added to the vnode list(s)
1789 * we'll have to rescan at least once more
1790 * if the queues aren't empty
1791 */
1792 must_rescan++;
1793 }
1794 buf_itercomplete(vp, &local_iterblkhd, VBI_CLEAN);
1795
1796 try_dirty_list:
1797 /*
1798 * Now iterate on dirty blks
1799 */
1800 if (buf_iterprepare(vp, &local_iterblkhd, VBI_DIRTY)) {
1801 continue;
1802 }
1803 while (!LIST_EMPTY(&local_iterblkhd)) {
1804 bp = LIST_FIRST(&local_iterblkhd);
1805
1806 LIST_REMOVE(bp, b_vnbufs);
1807 LIST_INSERT_HEAD(&vp->v_dirtyblkhd, bp, b_vnbufs);
1808
1809 /*
1810 * some filesystems distinguish meta data blocks with a negative logical block #
1811 */
1812 if ((flags & BUF_SKIP_META) && (bp->b_lblkno < 0 || ISSET(bp->b_flags, B_META))) {
1813 continue;
1814 }
1815
1816 aflags = BAC_REMOVE;
1817
1818 if (!(flags & BUF_INVALIDATE_LOCKED)) {
1819 aflags |= BAC_SKIP_LOCKED;
1820 }
1821
1822 if ((error = (int)buf_acquire_locked(bp, aflags, slpflag, slptimeo))) {
1823 if (error == EDEADLK) {
1824 /*
1825 * this buffer was marked B_LOCKED...
1826 * we didn't drop buf_mtx, so we
1827 * we don't need to rescan
1828 */
1829 continue;
1830 }
1831 if (error == EAGAIN) {
1832 /*
1833 * found a busy buffer... we blocked and
1834 * dropped buf_mtx, so we're going to
1835 * need to rescan after this pass is completed
1836 */
1837 must_rescan++;
1838 continue;
1839 }
1840 /*
1841 * got some kind of 'real' error out of the msleep
1842 * in buf_acquire_locked, terminate the scan and return the error
1843 */
1844 buf_itercomplete(vp, &local_iterblkhd, VBI_DIRTY);
1845
1846 lck_mtx_unlock(&buf_mtx);
1847 return error;
1848 }
1849 lck_mtx_unlock(&buf_mtx);
1850
1851 if (bp->b_flags & B_LOCKED) {
1852 KERNEL_DEBUG(0xbbbbc038, bp, 0, 0, 1, 0);
1853 }
1854
1855 CLR(bp->b_flags, B_LOCKED);
1856 SET(bp->b_flags, B_INVAL);
1857
1858 if (ISSET(bp->b_flags, B_DELWRI) && (flags & BUF_WRITE_DATA)) {
1859 (void) VNOP_BWRITE(bp);
1860 } else {
1861 buf_brelse(bp);
1862 }
1863
1864 lck_mtx_lock(&buf_mtx);
1865 /*
1866 * by dropping buf_mtx, we allow new
1867 * buffers to be added to the vnode list(s)
1868 * we'll have to rescan at least once more
1869 * if the queues aren't empty
1870 */
1871 must_rescan++;
1872 }
1873 buf_itercomplete(vp, &local_iterblkhd, VBI_DIRTY);
1874 }
1875 lck_mtx_unlock(&buf_mtx);
1876
1877 return 0;
1878 }
1879
1880 void
buf_flushdirtyblks(vnode_t vp,int wait,int flags,const char * msg)1881 buf_flushdirtyblks(vnode_t vp, int wait, int flags, const char *msg)
1882 {
1883 (void) buf_flushdirtyblks_skipinfo(vp, wait, flags, msg);
1884 return;
1885 }
1886
1887 int
buf_flushdirtyblks_skipinfo(vnode_t vp,int wait,int flags,const char * msg)1888 buf_flushdirtyblks_skipinfo(vnode_t vp, int wait, int flags, const char *msg)
1889 {
1890 buf_t bp;
1891 int writes_issued = 0;
1892 errno_t error;
1893 int busy = 0;
1894 struct buflists local_iterblkhd;
1895 int lock_flags = BAC_NOWAIT | BAC_REMOVE;
1896 int any_locked = 0;
1897
1898 if (flags & BUF_SKIP_LOCKED) {
1899 lock_flags |= BAC_SKIP_LOCKED;
1900 }
1901 if (flags & BUF_SKIP_NONLOCKED) {
1902 lock_flags |= BAC_SKIP_NONLOCKED;
1903 }
1904 loop:
1905 lck_mtx_lock(&buf_mtx);
1906
1907 if (buf_iterprepare(vp, &local_iterblkhd, VBI_DIRTY) == 0) {
1908 while (!LIST_EMPTY(&local_iterblkhd)) {
1909 bp = LIST_FIRST(&local_iterblkhd);
1910 LIST_REMOVE(bp, b_vnbufs);
1911 LIST_INSERT_HEAD(&vp->v_dirtyblkhd, bp, b_vnbufs);
1912
1913 if ((error = buf_acquire_locked(bp, lock_flags, 0, 0)) == EBUSY) {
1914 busy++;
1915 }
1916 if (error) {
1917 /*
1918 * If we passed in BUF_SKIP_LOCKED or BUF_SKIP_NONLOCKED,
1919 * we may want to do somethign differently if a locked or unlocked
1920 * buffer was encountered (depending on the arg specified).
1921 * In this case, we know that one of those two was set, and the
1922 * buf acquisition failed above.
1923 *
1924 * If it failed with EDEADLK, then save state which can be emitted
1925 * later on to the caller. Most callers should not care.
1926 */
1927 if (error == EDEADLK) {
1928 any_locked++;
1929 }
1930 continue;
1931 }
1932 lck_mtx_unlock(&buf_mtx);
1933
1934 bp->b_flags &= ~B_LOCKED;
1935
1936 /*
1937 * Wait for I/O associated with indirect blocks to complete,
1938 * since there is no way to quickly wait for them below.
1939 */
1940 if ((bp->b_vp == vp) || (wait == 0)) {
1941 (void) buf_bawrite(bp);
1942 } else {
1943 (void) VNOP_BWRITE(bp);
1944 }
1945 writes_issued++;
1946
1947 lck_mtx_lock(&buf_mtx);
1948 }
1949 buf_itercomplete(vp, &local_iterblkhd, VBI_DIRTY);
1950 }
1951 lck_mtx_unlock(&buf_mtx);
1952
1953 if (wait) {
1954 (void)vnode_waitforwrites(vp, 0, 0, 0, msg);
1955
1956 if (vp->v_dirtyblkhd.lh_first && busy) {
1957 /*
1958 * we had one or more BUSY buffers on
1959 * the dirtyblock list... most likely
1960 * these are due to delayed writes that
1961 * were moved to the bclean queue but
1962 * have not yet been 'written'.
1963 * if we issued some writes on the
1964 * previous pass, we try again immediately
1965 * if we didn't, we'll sleep for some time
1966 * to allow the state to change...
1967 */
1968 if (writes_issued == 0) {
1969 (void)tsleep((caddr_t)&vp->v_numoutput,
1970 PRIBIO + 1, "vnode_flushdirtyblks", hz / 20);
1971 }
1972 writes_issued = 0;
1973 busy = 0;
1974
1975 goto loop;
1976 }
1977 }
1978
1979 return any_locked;
1980 }
1981
1982
1983 /*
1984 * called with buf_mtx held...
1985 * this lock protects the queue manipulation
1986 */
1987 static int
buf_iterprepare(vnode_t vp,struct buflists * iterheadp,int flags)1988 buf_iterprepare(vnode_t vp, struct buflists *iterheadp, int flags)
1989 {
1990 struct buflists * listheadp;
1991
1992 if (flags & VBI_DIRTY) {
1993 listheadp = &vp->v_dirtyblkhd;
1994 } else {
1995 listheadp = &vp->v_cleanblkhd;
1996 }
1997
1998 while (vp->v_iterblkflags & VBI_ITER) {
1999 vp->v_iterblkflags |= VBI_ITERWANT;
2000 msleep(&vp->v_iterblkflags, &buf_mtx, 0, "buf_iterprepare", NULL);
2001 }
2002 if (LIST_EMPTY(listheadp)) {
2003 LIST_INIT(iterheadp);
2004 return EINVAL;
2005 }
2006 vp->v_iterblkflags |= VBI_ITER;
2007
2008 iterheadp->lh_first = listheadp->lh_first;
2009 listheadp->lh_first->b_vnbufs.le_prev = &iterheadp->lh_first;
2010 LIST_INIT(listheadp);
2011
2012 return 0;
2013 }
2014
2015 /*
2016 * called with buf_mtx held...
2017 * this lock protects the queue manipulation
2018 */
2019 static void
buf_itercomplete(vnode_t vp,struct buflists * iterheadp,int flags)2020 buf_itercomplete(vnode_t vp, struct buflists *iterheadp, int flags)
2021 {
2022 struct buflists * listheadp;
2023 buf_t bp;
2024
2025 if (flags & VBI_DIRTY) {
2026 listheadp = &vp->v_dirtyblkhd;
2027 } else {
2028 listheadp = &vp->v_cleanblkhd;
2029 }
2030
2031 while (!LIST_EMPTY(iterheadp)) {
2032 bp = LIST_FIRST(iterheadp);
2033 LIST_REMOVE(bp, b_vnbufs);
2034 LIST_INSERT_HEAD(listheadp, bp, b_vnbufs);
2035 }
2036 vp->v_iterblkflags &= ~VBI_ITER;
2037
2038 if (vp->v_iterblkflags & VBI_ITERWANT) {
2039 vp->v_iterblkflags &= ~VBI_ITERWANT;
2040 wakeup(&vp->v_iterblkflags);
2041 }
2042 }
2043
2044
2045 static void
bremfree_locked(buf_t bp)2046 bremfree_locked(buf_t bp)
2047 {
2048 struct bqueues *dp = NULL;
2049 int whichq;
2050
2051 whichq = bp->b_whichq;
2052
2053 if (whichq == -1) {
2054 if (bp->b_shadow_ref == 0) {
2055 panic("bremfree_locked: %p not on freelist", bp);
2056 }
2057 /*
2058 * there are clones pointing to 'bp'...
2059 * therefore, it was not put on a freelist
2060 * when buf_brelse was last called on 'bp'
2061 */
2062 return;
2063 }
2064 /*
2065 * We only calculate the head of the freelist when removing
2066 * the last element of the list as that is the only time that
2067 * it is needed (e.g. to reset the tail pointer).
2068 *
2069 * NB: This makes an assumption about how tailq's are implemented.
2070 */
2071 if (bp->b_freelist.tqe_next == NULL) {
2072 dp = &bufqueues[whichq];
2073
2074 if (dp->tqh_last != &bp->b_freelist.tqe_next) {
2075 panic("bremfree: lost tail");
2076 }
2077 }
2078 TAILQ_REMOVE(dp, bp, b_freelist);
2079
2080 if (whichq == BQ_LAUNDRY) {
2081 blaundrycnt--;
2082 }
2083
2084 bp->b_whichq = -1;
2085 bp->b_timestamp = 0;
2086 bp->b_shadow = 0;
2087 }
2088
2089 /*
2090 * Associate a buffer with a vnode.
2091 * buf_mtx must be locked on entry
2092 */
2093 static void
bgetvp_locked(vnode_t vp,buf_t bp)2094 bgetvp_locked(vnode_t vp, buf_t bp)
2095 {
2096 if (bp->b_vp != vp) {
2097 panic("bgetvp_locked: not free");
2098 }
2099
2100 if (vp->v_type == VBLK || vp->v_type == VCHR) {
2101 bp->b_dev = vp->v_rdev;
2102 } else {
2103 bp->b_dev = NODEV;
2104 }
2105 /*
2106 * Insert onto list for new vnode.
2107 */
2108 bufinsvn(bp, &vp->v_cleanblkhd);
2109 }
2110
2111 /*
2112 * Disassociate a buffer from a vnode.
2113 * buf_mtx must be locked on entry
2114 */
2115 static void
brelvp_locked(buf_t bp)2116 brelvp_locked(buf_t bp)
2117 {
2118 /*
2119 * Delete from old vnode list, if on one.
2120 */
2121 if (bp->b_vnbufs.le_next != NOLIST) {
2122 bufremvn(bp);
2123 }
2124
2125 bp->b_vp = (vnode_t)NULL;
2126 }
2127
2128 /*
2129 * Reassign a buffer from one vnode to another.
2130 * Used to assign file specific control information
2131 * (indirect blocks) to the vnode to which they belong.
2132 */
2133 static void
buf_reassign(buf_t bp,vnode_t newvp)2134 buf_reassign(buf_t bp, vnode_t newvp)
2135 {
2136 struct buflists *listheadp;
2137
2138 if (newvp == NULL) {
2139 printf("buf_reassign: NULL");
2140 return;
2141 }
2142 lck_mtx_lock_spin(&buf_mtx);
2143
2144 /*
2145 * Delete from old vnode list, if on one.
2146 */
2147 if (bp->b_vnbufs.le_next != NOLIST) {
2148 bufremvn(bp);
2149 }
2150 /*
2151 * If dirty, put on list of dirty buffers;
2152 * otherwise insert onto list of clean buffers.
2153 */
2154 if (ISSET(bp->b_flags, B_DELWRI)) {
2155 listheadp = &newvp->v_dirtyblkhd;
2156 } else {
2157 listheadp = &newvp->v_cleanblkhd;
2158 }
2159 bufinsvn(bp, listheadp);
2160
2161 lck_mtx_unlock(&buf_mtx);
2162 }
2163
2164 static __inline__ void
bufhdrinit(buf_t bp)2165 bufhdrinit(buf_t bp)
2166 {
2167 bzero((char *)bp, sizeof *bp);
2168 bp->b_dev = NODEV;
2169 bp->b_rcred = NOCRED;
2170 bp->b_wcred = NOCRED;
2171 bp->b_vnbufs.le_next = NOLIST;
2172 bp->b_flags = B_INVAL;
2173
2174 return;
2175 }
2176
2177 /*
2178 * Initialize buffers and hash links for buffers.
2179 */
2180 __private_extern__ void
bufinit(void)2181 bufinit(void)
2182 {
2183 buf_t bp;
2184 struct bqueues *dp;
2185 int i;
2186
2187 nbuf_headers = 0;
2188 /* Initialize the buffer queues ('freelists') and the hash table */
2189 for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++) {
2190 TAILQ_INIT(dp);
2191 }
2192 bufhashtbl = hashinit(nbuf_hashelements, M_CACHE, &bufhash);
2193
2194 buf_busycount = 0;
2195
2196 /* Initialize the buffer headers */
2197 for (i = 0; i < max_nbuf_headers; i++) {
2198 nbuf_headers++;
2199 bp = &buf_headers[i];
2200 bufhdrinit(bp);
2201
2202 BLISTNONE(bp);
2203 dp = &bufqueues[BQ_EMPTY];
2204 bp->b_whichq = BQ_EMPTY;
2205 bp->b_timestamp = buf_timestamp();
2206 binsheadfree(bp, dp, BQ_EMPTY);
2207 binshash(bp, &invalhash);
2208 }
2209 boot_nbuf_headers = nbuf_headers;
2210
2211 TAILQ_INIT(&iobufqueue);
2212 TAILQ_INIT(&delaybufqueue);
2213
2214 for (; i < nbuf_headers + niobuf_headers; i++) {
2215 bp = &buf_headers[i];
2216 bufhdrinit(bp);
2217 bp->b_whichq = -1;
2218 binsheadfree(bp, &iobufqueue, -1);
2219 }
2220
2221 /*
2222 * allocate and initialize cluster specific global locks...
2223 */
2224 cluster_init();
2225
2226 printf("using %d buffer headers and %d cluster IO buffer headers\n",
2227 nbuf_headers, niobuf_headers);
2228
2229 /* start the bcleanbuf() thread */
2230 bcleanbuf_thread_init();
2231
2232 /* Register a callout for relieving vm pressure */
2233 if (vm_set_buffer_cleanup_callout(buffer_cache_gc) != KERN_SUCCESS) {
2234 panic("Couldn't register buffer cache callout for vm pressure!");
2235 }
2236 }
2237
2238 /*
2239 * Zones for the meta data buffers
2240 */
2241
2242 #define MINMETA 512
2243 #define MAXMETA 16384
2244
2245 KALLOC_HEAP_DEFINE(KHEAP_VFS_BIO, "vfs_bio", KHEAP_ID_DATA_BUFFERS);
2246
2247 static struct buf *
bio_doread(vnode_t vp,daddr64_t blkno,int size,kauth_cred_t cred,int async,int queuetype)2248 bio_doread(vnode_t vp, daddr64_t blkno, int size, kauth_cred_t cred, int async, int queuetype)
2249 {
2250 buf_t bp;
2251
2252 bp = buf_getblk(vp, blkno, size, 0, 0, queuetype);
2253
2254 /*
2255 * If buffer does not have data valid, start a read.
2256 * Note that if buffer is B_INVAL, buf_getblk() won't return it.
2257 * Therefore, it's valid if it's I/O has completed or been delayed.
2258 */
2259 if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) {
2260 struct proc *p;
2261
2262 p = current_proc();
2263
2264 /* Start I/O for the buffer (keeping credentials). */
2265 SET(bp->b_flags, B_READ | async);
2266 if (IS_VALID_CRED(cred) && !IS_VALID_CRED(bp->b_rcred)) {
2267 kauth_cred_ref(cred);
2268 bp->b_rcred = cred;
2269 }
2270
2271 VNOP_STRATEGY(bp);
2272
2273 trace(TR_BREADMISS, pack(vp, size), blkno);
2274
2275 /* Pay for the read. */
2276 if (p && p->p_stats) {
2277 OSIncrementAtomicLong(&p->p_stats->p_ru.ru_inblock); /* XXX */
2278 }
2279
2280 if (async) {
2281 /*
2282 * since we asked for an ASYNC I/O
2283 * the biodone will do the brelse
2284 * we don't want to pass back a bp
2285 * that we don't 'own'
2286 */
2287 bp = NULL;
2288 }
2289 } else if (async) {
2290 buf_brelse(bp);
2291 bp = NULL;
2292 }
2293
2294 trace(TR_BREADHIT, pack(vp, size), blkno);
2295
2296 return bp;
2297 }
2298
2299 /*
2300 * Perform the reads for buf_breadn() and buf_meta_breadn().
2301 * Trivial modification to the breada algorithm presented in Bach (p.55).
2302 */
2303 static errno_t
do_breadn_for_type(vnode_t vp,daddr64_t blkno,int size,daddr64_t * rablks,int * rasizes,int nrablks,kauth_cred_t cred,buf_t * bpp,int queuetype)2304 do_breadn_for_type(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes,
2305 int nrablks, kauth_cred_t cred, buf_t *bpp, int queuetype)
2306 {
2307 buf_t bp;
2308 int i;
2309
2310 bp = *bpp = bio_doread(vp, blkno, size, cred, 0, queuetype);
2311
2312 /*
2313 * For each of the read-ahead blocks, start a read, if necessary.
2314 */
2315 for (i = 0; i < nrablks; i++) {
2316 /* If it's in the cache, just go on to next one. */
2317 if (incore(vp, rablks[i])) {
2318 continue;
2319 }
2320
2321 /* Get a buffer for the read-ahead block */
2322 (void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC, queuetype);
2323 }
2324
2325 /* Otherwise, we had to start a read for it; wait until it's valid. */
2326 return buf_biowait(bp);
2327 }
2328
2329
2330 /*
2331 * Read a disk block.
2332 * This algorithm described in Bach (p.54).
2333 */
2334 errno_t
buf_bread(vnode_t vp,daddr64_t blkno,int size,kauth_cred_t cred,buf_t * bpp)2335 buf_bread(vnode_t vp, daddr64_t blkno, int size, kauth_cred_t cred, buf_t *bpp)
2336 {
2337 buf_t bp;
2338
2339 /* Get buffer for block. */
2340 bp = *bpp = bio_doread(vp, blkno, size, cred, 0, BLK_READ);
2341
2342 /* Wait for the read to complete, and return result. */
2343 return buf_biowait(bp);
2344 }
2345
2346 /*
2347 * Read a disk block. [bread() for meta-data]
2348 * This algorithm described in Bach (p.54).
2349 */
2350 errno_t
buf_meta_bread(vnode_t vp,daddr64_t blkno,int size,kauth_cred_t cred,buf_t * bpp)2351 buf_meta_bread(vnode_t vp, daddr64_t blkno, int size, kauth_cred_t cred, buf_t *bpp)
2352 {
2353 buf_t bp;
2354
2355 /* Get buffer for block. */
2356 bp = *bpp = bio_doread(vp, blkno, size, cred, 0, BLK_META);
2357
2358 /* Wait for the read to complete, and return result. */
2359 return buf_biowait(bp);
2360 }
2361
2362 /*
2363 * Read-ahead multiple disk blocks. The first is sync, the rest async.
2364 */
2365 errno_t
buf_breadn(vnode_t vp,daddr64_t blkno,int size,daddr64_t * rablks,int * rasizes,int nrablks,kauth_cred_t cred,buf_t * bpp)2366 buf_breadn(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes, int nrablks, kauth_cred_t cred, buf_t *bpp)
2367 {
2368 return do_breadn_for_type(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp, BLK_READ);
2369 }
2370
2371 /*
2372 * Read-ahead multiple disk blocks. The first is sync, the rest async.
2373 * [buf_breadn() for meta-data]
2374 */
2375 errno_t
buf_meta_breadn(vnode_t vp,daddr64_t blkno,int size,daddr64_t * rablks,int * rasizes,int nrablks,kauth_cred_t cred,buf_t * bpp)2376 buf_meta_breadn(vnode_t vp, daddr64_t blkno, int size, daddr64_t *rablks, int *rasizes, int nrablks, kauth_cred_t cred, buf_t *bpp)
2377 {
2378 return do_breadn_for_type(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp, BLK_META);
2379 }
2380
2381 /*
2382 * Block write. Described in Bach (p.56)
2383 */
2384 errno_t
buf_bwrite(buf_t bp)2385 buf_bwrite(buf_t bp)
2386 {
2387 int sync, wasdelayed;
2388 errno_t rv;
2389 proc_t p = current_proc();
2390 vnode_t vp = bp->b_vp;
2391
2392 if (bp->b_datap == 0) {
2393 if (brecover_data(bp) == 0) {
2394 return 0;
2395 }
2396 }
2397 /* Remember buffer type, to switch on it later. */
2398 sync = !ISSET(bp->b_flags, B_ASYNC);
2399 wasdelayed = ISSET(bp->b_flags, B_DELWRI);
2400 CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI));
2401
2402 if (wasdelayed) {
2403 OSAddAtomicLong(-1, &nbdwrite);
2404 }
2405
2406 if (!sync) {
2407 /*
2408 * If not synchronous, pay for the I/O operation and make
2409 * sure the buf is on the correct vnode queue. We have
2410 * to do this now, because if we don't, the vnode may not
2411 * be properly notified that its I/O has completed.
2412 */
2413 if (wasdelayed) {
2414 buf_reassign(bp, vp);
2415 } else if (p && p->p_stats) {
2416 OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock); /* XXX */
2417 }
2418 }
2419 trace(TR_BUFWRITE, pack(vp, bp->b_bcount), bp->b_lblkno);
2420
2421 /* Initiate disk write. Make sure the appropriate party is charged. */
2422
2423 OSAddAtomic(1, &vp->v_numoutput);
2424
2425 VNOP_STRATEGY(bp);
2426
2427 if (sync) {
2428 /*
2429 * If I/O was synchronous, wait for it to complete.
2430 */
2431 rv = buf_biowait(bp);
2432
2433 /*
2434 * Pay for the I/O operation, if it's not been paid for, and
2435 * make sure it's on the correct vnode queue. (async operatings
2436 * were payed for above.)
2437 */
2438 if (wasdelayed) {
2439 buf_reassign(bp, vp);
2440 } else if (p && p->p_stats) {
2441 OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock); /* XXX */
2442 }
2443
2444 /* Release the buffer. */
2445 buf_brelse(bp);
2446
2447 return rv;
2448 } else {
2449 return 0;
2450 }
2451 }
2452
2453 int
vn_bwrite(struct vnop_bwrite_args * ap)2454 vn_bwrite(struct vnop_bwrite_args *ap)
2455 {
2456 return buf_bwrite(ap->a_bp);
2457 }
2458
2459 /*
2460 * Delayed write.
2461 *
2462 * The buffer is marked dirty, but is not queued for I/O.
2463 * This routine should be used when the buffer is expected
2464 * to be modified again soon, typically a small write that
2465 * partially fills a buffer.
2466 *
2467 * NB: magnetic tapes cannot be delayed; they must be
2468 * written in the order that the writes are requested.
2469 *
2470 * Described in Leffler, et al. (pp. 208-213).
2471 *
2472 * Note: With the ability to allocate additional buffer
2473 * headers, we can get in to the situation where "too" many
2474 * buf_bdwrite()s can create situation where the kernel can create
2475 * buffers faster than the disks can service. Doing a buf_bawrite() in
2476 * cases where we have "too many" outstanding buf_bdwrite()s avoids that.
2477 */
2478 int
bdwrite_internal(buf_t bp,int return_error)2479 bdwrite_internal(buf_t bp, int return_error)
2480 {
2481 proc_t p = current_proc();
2482 vnode_t vp = bp->b_vp;
2483
2484 /*
2485 * If the block hasn't been seen before:
2486 * (1) Mark it as having been seen,
2487 * (2) Charge for the write.
2488 * (3) Make sure it's on its vnode's correct block list,
2489 */
2490 if (!ISSET(bp->b_flags, B_DELWRI)) {
2491 SET(bp->b_flags, B_DELWRI);
2492 if (p && p->p_stats) {
2493 OSIncrementAtomicLong(&p->p_stats->p_ru.ru_oublock); /* XXX */
2494 }
2495 OSAddAtomicLong(1, &nbdwrite);
2496 buf_reassign(bp, vp);
2497 }
2498
2499 /*
2500 * if we're not LOCKED, but the total number of delayed writes
2501 * has climbed above 75% of the total buffers in the system
2502 * return an error if the caller has indicated that it can
2503 * handle one in this case, otherwise schedule the I/O now
2504 * this is done to prevent us from allocating tons of extra
2505 * buffers when dealing with virtual disks (i.e. DiskImages),
2506 * because additional buffers are dynamically allocated to prevent
2507 * deadlocks from occurring
2508 *
2509 * however, can't do a buf_bawrite() if the LOCKED bit is set because the
2510 * buffer is part of a transaction and can't go to disk until
2511 * the LOCKED bit is cleared.
2512 */
2513 if (!ISSET(bp->b_flags, B_LOCKED) && nbdwrite > ((nbuf_headers / 4) * 3)) {
2514 if (return_error) {
2515 return EAGAIN;
2516 }
2517 /*
2518 * If the vnode has "too many" write operations in progress
2519 * wait for them to finish the IO
2520 */
2521 (void)vnode_waitforwrites(vp, VNODE_ASYNC_THROTTLE, 0, 0, "buf_bdwrite");
2522
2523 return buf_bawrite(bp);
2524 }
2525
2526 /* Otherwise, the "write" is done, so mark and release the buffer. */
2527 SET(bp->b_flags, B_DONE);
2528 buf_brelse(bp);
2529 return 0;
2530 }
2531
2532 errno_t
buf_bdwrite(buf_t bp)2533 buf_bdwrite(buf_t bp)
2534 {
2535 return bdwrite_internal(bp, 0);
2536 }
2537
2538
2539 /*
2540 * Asynchronous block write; just an asynchronous buf_bwrite().
2541 *
2542 * Note: With the abilitty to allocate additional buffer
2543 * headers, we can get in to the situation where "too" many
2544 * buf_bawrite()s can create situation where the kernel can create
2545 * buffers faster than the disks can service.
2546 * We limit the number of "in flight" writes a vnode can have to
2547 * avoid this.
2548 */
2549 static int
bawrite_internal(buf_t bp,int throttle)2550 bawrite_internal(buf_t bp, int throttle)
2551 {
2552 vnode_t vp = bp->b_vp;
2553
2554 if (vp) {
2555 if (throttle) {
2556 /*
2557 * If the vnode has "too many" write operations in progress
2558 * wait for them to finish the IO
2559 */
2560 (void)vnode_waitforwrites(vp, VNODE_ASYNC_THROTTLE, 0, 0, (const char *)"buf_bawrite");
2561 } else if (vp->v_numoutput >= VNODE_ASYNC_THROTTLE) {
2562 /*
2563 * return to the caller and
2564 * let him decide what to do
2565 */
2566 return EWOULDBLOCK;
2567 }
2568 }
2569 SET(bp->b_flags, B_ASYNC);
2570
2571 return VNOP_BWRITE(bp);
2572 }
2573
2574 errno_t
buf_bawrite(buf_t bp)2575 buf_bawrite(buf_t bp)
2576 {
2577 return bawrite_internal(bp, 1);
2578 }
2579
2580
2581
2582 static void
buf_free_meta_store(buf_t bp)2583 buf_free_meta_store(buf_t bp)
2584 {
2585 if (bp->b_bufsize) {
2586 uintptr_t datap = bp->b_datap;
2587 int bufsize = bp->b_bufsize;
2588
2589 bp->b_datap = (uintptr_t)NULL;
2590 bp->b_bufsize = 0;
2591
2592 /*
2593 * Ensure the assignment of b_datap has global visibility
2594 * before we free the region.
2595 */
2596 OSMemoryBarrier();
2597
2598 if (ISSET(bp->b_flags, B_ZALLOC)) {
2599 kheap_free(KHEAP_VFS_BIO, datap, bufsize);
2600 } else {
2601 kmem_free(kernel_map, datap, bufsize);
2602 }
2603 }
2604 }
2605
2606
2607 static buf_t
buf_brelse_shadow(buf_t bp)2608 buf_brelse_shadow(buf_t bp)
2609 {
2610 buf_t bp_head;
2611 buf_t bp_temp;
2612 buf_t bp_return = NULL;
2613 #ifdef BUF_MAKE_PRIVATE
2614 buf_t bp_data;
2615 int data_ref = 0;
2616 #endif
2617 int need_wakeup = 0;
2618
2619 lck_mtx_lock_spin(&buf_mtx);
2620
2621 __IGNORE_WCASTALIGN(bp_head = (buf_t)bp->b_orig);
2622
2623 if (bp_head->b_whichq != -1) {
2624 panic("buf_brelse_shadow: bp_head on freelist %d", bp_head->b_whichq);
2625 }
2626
2627 #ifdef BUF_MAKE_PRIVATE
2628 if (bp_data = bp->b_data_store) {
2629 bp_data->b_data_ref--;
2630 /*
2631 * snapshot the ref count so that we can check it
2632 * outside of the lock... we only want the guy going
2633 * from 1 -> 0 to try and release the storage
2634 */
2635 data_ref = bp_data->b_data_ref;
2636 }
2637 #endif
2638 KERNEL_DEBUG(0xbbbbc008 | DBG_FUNC_START, bp, bp_head, bp_head->b_shadow_ref, 0, 0);
2639
2640 bp_head->b_shadow_ref--;
2641
2642 for (bp_temp = bp_head; bp_temp && bp != bp_temp->b_shadow; bp_temp = bp_temp->b_shadow) {
2643 ;
2644 }
2645
2646 if (bp_temp == NULL) {
2647 panic("buf_brelse_shadow: bp not on list %p", bp_head);
2648 }
2649
2650 bp_temp->b_shadow = bp_temp->b_shadow->b_shadow;
2651
2652 #ifdef BUF_MAKE_PRIVATE
2653 /*
2654 * we're about to free the current 'owner' of the data buffer and
2655 * there is at least one other shadow buf_t still pointing at it
2656 * so transfer it to the first shadow buf left in the chain
2657 */
2658 if (bp == bp_data && data_ref) {
2659 if ((bp_data = bp_head->b_shadow) == NULL) {
2660 panic("buf_brelse_shadow: data_ref mismatch bp(%p)", bp);
2661 }
2662
2663 for (bp_temp = bp_data; bp_temp; bp_temp = bp_temp->b_shadow) {
2664 bp_temp->b_data_store = bp_data;
2665 }
2666 bp_data->b_data_ref = data_ref;
2667 }
2668 #endif
2669 if (bp_head->b_shadow_ref == 0 && bp_head->b_shadow) {
2670 panic("buf_relse_shadow: b_shadow != NULL && b_shadow_ref == 0 bp(%p)", bp);
2671 }
2672 if (bp_head->b_shadow_ref && bp_head->b_shadow == 0) {
2673 panic("buf_relse_shadow: b_shadow == NULL && b_shadow_ref != 0 bp(%p)", bp);
2674 }
2675
2676 if (bp_head->b_shadow_ref == 0) {
2677 if (!ISSET(bp_head->b_lflags, BL_BUSY)) {
2678 CLR(bp_head->b_flags, B_AGE);
2679 bp_head->b_timestamp = buf_timestamp();
2680
2681 if (ISSET(bp_head->b_flags, B_LOCKED)) {
2682 bp_head->b_whichq = BQ_LOCKED;
2683 binstailfree(bp_head, &bufqueues[BQ_LOCKED], BQ_LOCKED);
2684 } else {
2685 bp_head->b_whichq = BQ_META;
2686 binstailfree(bp_head, &bufqueues[BQ_META], BQ_META);
2687 }
2688 } else if (ISSET(bp_head->b_lflags, BL_WAITSHADOW)) {
2689 CLR(bp_head->b_lflags, BL_WAITSHADOW);
2690
2691 bp_return = bp_head;
2692 }
2693 if (ISSET(bp_head->b_lflags, BL_WANTED_REF)) {
2694 CLR(bp_head->b_lflags, BL_WANTED_REF);
2695 need_wakeup = 1;
2696 }
2697 }
2698 lck_mtx_unlock(&buf_mtx);
2699
2700 if (need_wakeup) {
2701 wakeup(bp_head);
2702 }
2703
2704 #ifdef BUF_MAKE_PRIVATE
2705 if (bp == bp_data && data_ref == 0) {
2706 buf_free_meta_store(bp);
2707 }
2708
2709 bp->b_data_store = NULL;
2710 #endif
2711 KERNEL_DEBUG(0xbbbbc008 | DBG_FUNC_END, bp, 0, 0, 0, 0);
2712
2713 return bp_return;
2714 }
2715
2716
2717 /*
2718 * Release a buffer on to the free lists.
2719 * Described in Bach (p. 46).
2720 */
2721 void
buf_brelse(buf_t bp)2722 buf_brelse(buf_t bp)
2723 {
2724 struct bqueues *bufq;
2725 int whichq;
2726 upl_t upl;
2727 int need_wakeup = 0;
2728 int need_bp_wakeup = 0;
2729
2730
2731 if (bp->b_whichq != -1 || !(bp->b_lflags & BL_BUSY)) {
2732 panic("buf_brelse: bad buffer = %p", bp);
2733 }
2734
2735 #ifdef JOE_DEBUG
2736 (void) OSBacktrace(&bp->b_stackbrelse[0], 6);
2737
2738 bp->b_lastbrelse = current_thread();
2739 bp->b_tag = 0;
2740 #endif
2741 if (bp->b_lflags & BL_IOBUF) {
2742 buf_t shadow_master_bp = NULL;
2743
2744 if (ISSET(bp->b_lflags, BL_SHADOW)) {
2745 shadow_master_bp = buf_brelse_shadow(bp);
2746 } else if (ISSET(bp->b_lflags, BL_IOBUF_ALLOC)) {
2747 buf_free_meta_store(bp);
2748 }
2749 free_io_buf(bp);
2750
2751 if (shadow_master_bp) {
2752 bp = shadow_master_bp;
2753 goto finish_shadow_master;
2754 }
2755 return;
2756 }
2757
2758 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 388)) | DBG_FUNC_START,
2759 bp->b_lblkno * PAGE_SIZE, bp, bp->b_datap,
2760 bp->b_flags, 0);
2761
2762 trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
2763
2764 /*
2765 * if we're invalidating a buffer that has the B_FILTER bit
2766 * set then call the b_iodone function so it gets cleaned
2767 * up properly.
2768 *
2769 * the HFS journal code depends on this
2770 */
2771 if (ISSET(bp->b_flags, B_META) && ISSET(bp->b_flags, B_INVAL)) {
2772 if (ISSET(bp->b_flags, B_FILTER)) { /* if necessary, call out */
2773 void (*iodone_func)(struct buf *, void *) = bp->b_iodone;
2774 void *arg = bp->b_transaction;
2775
2776 CLR(bp->b_flags, B_FILTER); /* but note callout done */
2777 bp->b_iodone = NULL;
2778 bp->b_transaction = NULL;
2779
2780 if (iodone_func == NULL) {
2781 panic("brelse: bp @ %p has NULL b_iodone!", bp);
2782 }
2783 (*iodone_func)(bp, arg);
2784 }
2785 }
2786 /*
2787 * I/O is done. Cleanup the UPL state
2788 */
2789 upl = bp->b_upl;
2790
2791 if (!ISSET(bp->b_flags, B_META) && UBCINFOEXISTS(bp->b_vp) && bp->b_bufsize) {
2792 kern_return_t kret;
2793 int upl_flags;
2794
2795 if (upl == NULL) {
2796 if (!ISSET(bp->b_flags, B_INVAL)) {
2797 kret = ubc_create_upl_kernel(bp->b_vp,
2798 ubc_blktooff(bp->b_vp, bp->b_lblkno),
2799 bp->b_bufsize,
2800 &upl,
2801 NULL,
2802 UPL_PRECIOUS,
2803 VM_KERN_MEMORY_FILE);
2804
2805 if (kret != KERN_SUCCESS) {
2806 panic("brelse: Failed to create UPL");
2807 }
2808 #if UPL_DEBUG
2809 upl_ubc_alias_set(upl, (uintptr_t) bp, (uintptr_t) 5);
2810 #endif /* UPL_DEBUG */
2811 }
2812 } else {
2813 if (bp->b_datap) {
2814 kret = ubc_upl_unmap(upl);
2815
2816 if (kret != KERN_SUCCESS) {
2817 panic("ubc_upl_unmap failed");
2818 }
2819 bp->b_datap = (uintptr_t)NULL;
2820 }
2821 }
2822 if (upl) {
2823 if (bp->b_flags & (B_ERROR | B_INVAL)) {
2824 if (bp->b_flags & (B_READ | B_INVAL)) {
2825 upl_flags = UPL_ABORT_DUMP_PAGES;
2826 } else {
2827 upl_flags = 0;
2828 }
2829
2830 ubc_upl_abort(upl, upl_flags);
2831 } else {
2832 if (ISSET(bp->b_flags, B_DELWRI | B_WASDIRTY)) {
2833 upl_flags = UPL_COMMIT_SET_DIRTY;
2834 } else {
2835 upl_flags = UPL_COMMIT_CLEAR_DIRTY;
2836 }
2837
2838 ubc_upl_commit_range(upl, 0, bp->b_bufsize, upl_flags |
2839 UPL_COMMIT_INACTIVATE | UPL_COMMIT_FREE_ON_EMPTY);
2840 }
2841 bp->b_upl = NULL;
2842 }
2843 } else {
2844 if ((upl)) {
2845 panic("brelse: UPL set for non VREG; vp=%p", bp->b_vp);
2846 }
2847 }
2848
2849 /*
2850 * If it's locked, don't report an error; try again later.
2851 */
2852 if (ISSET(bp->b_flags, (B_LOCKED | B_ERROR)) == (B_LOCKED | B_ERROR)) {
2853 CLR(bp->b_flags, B_ERROR);
2854 }
2855 /*
2856 * If it's not cacheable, or an error, mark it invalid.
2857 */
2858 if (ISSET(bp->b_flags, (B_NOCACHE | B_ERROR))) {
2859 SET(bp->b_flags, B_INVAL);
2860 }
2861
2862 if ((bp->b_bufsize <= 0) ||
2863 ISSET(bp->b_flags, B_INVAL) ||
2864 (ISSET(bp->b_lflags, BL_WANTDEALLOC) && !ISSET(bp->b_flags, B_DELWRI))) {
2865 boolean_t delayed_buf_free_meta_store = FALSE;
2866
2867 /*
2868 * If it's invalid or empty, dissociate it from its vnode,
2869 * release its storage if B_META, and
2870 * clean it up a bit and put it on the EMPTY queue
2871 */
2872 if (ISSET(bp->b_flags, B_DELWRI)) {
2873 OSAddAtomicLong(-1, &nbdwrite);
2874 }
2875
2876 if (ISSET(bp->b_flags, B_META)) {
2877 if (bp->b_shadow_ref) {
2878 delayed_buf_free_meta_store = TRUE;
2879 } else {
2880 buf_free_meta_store(bp);
2881 }
2882 }
2883 /*
2884 * nuke any credentials we were holding
2885 */
2886 buf_release_credentials(bp);
2887
2888 lck_mtx_lock_spin(&buf_mtx);
2889
2890 if (bp->b_shadow_ref) {
2891 SET(bp->b_lflags, BL_WAITSHADOW);
2892
2893 lck_mtx_unlock(&buf_mtx);
2894
2895 return;
2896 }
2897 if (delayed_buf_free_meta_store == TRUE) {
2898 lck_mtx_unlock(&buf_mtx);
2899 finish_shadow_master:
2900 buf_free_meta_store(bp);
2901
2902 lck_mtx_lock_spin(&buf_mtx);
2903 }
2904 CLR(bp->b_flags, (B_META | B_ZALLOC | B_DELWRI | B_LOCKED | B_AGE | B_ASYNC | B_NOCACHE | B_FUA));
2905
2906 if (bp->b_vp) {
2907 brelvp_locked(bp);
2908 }
2909
2910 bremhash(bp);
2911 BLISTNONE(bp);
2912 binshash(bp, &invalhash);
2913
2914 bp->b_whichq = BQ_EMPTY;
2915 binsheadfree(bp, &bufqueues[BQ_EMPTY], BQ_EMPTY);
2916 } else {
2917 /*
2918 * It has valid data. Put it on the end of the appropriate
2919 * queue, so that it'll stick around for as long as possible.
2920 */
2921 if (ISSET(bp->b_flags, B_LOCKED)) {
2922 whichq = BQ_LOCKED; /* locked in core */
2923 } else if (ISSET(bp->b_flags, B_META)) {
2924 whichq = BQ_META; /* meta-data */
2925 } else if (ISSET(bp->b_flags, B_AGE)) {
2926 whichq = BQ_AGE; /* stale but valid data */
2927 } else {
2928 whichq = BQ_LRU; /* valid data */
2929 }
2930 bufq = &bufqueues[whichq];
2931
2932 bp->b_timestamp = buf_timestamp();
2933
2934 lck_mtx_lock_spin(&buf_mtx);
2935
2936 /*
2937 * the buf_brelse_shadow routine doesn't take 'ownership'
2938 * of the parent buf_t... it updates state that is protected by
2939 * the buf_mtx, and checks for BL_BUSY to determine whether to
2940 * put the buf_t back on a free list. b_shadow_ref is protected
2941 * by the lock, and since we have not yet cleared B_BUSY, we need
2942 * to check it while holding the lock to insure that one of us
2943 * puts this buf_t back on a free list when it is safe to do so
2944 */
2945 if (bp->b_shadow_ref == 0) {
2946 CLR(bp->b_flags, (B_AGE | B_ASYNC | B_NOCACHE));
2947 bp->b_whichq = whichq;
2948 binstailfree(bp, bufq, whichq);
2949 } else {
2950 /*
2951 * there are still cloned buf_t's pointing
2952 * at this guy... need to keep it off the
2953 * freelists until a buf_brelse is done on
2954 * the last clone
2955 */
2956 CLR(bp->b_flags, (B_ASYNC | B_NOCACHE));
2957 }
2958 }
2959 if (needbuffer) {
2960 /*
2961 * needbuffer is a global
2962 * we're currently using buf_mtx to protect it
2963 * delay doing the actual wakeup until after
2964 * we drop buf_mtx
2965 */
2966 needbuffer = 0;
2967 need_wakeup = 1;
2968 }
2969 if (ISSET(bp->b_lflags, BL_WANTED)) {
2970 /*
2971 * delay the actual wakeup until after we
2972 * clear BL_BUSY and we've dropped buf_mtx
2973 */
2974 need_bp_wakeup = 1;
2975 }
2976 /*
2977 * Unlock the buffer.
2978 */
2979 CLR(bp->b_lflags, (BL_BUSY | BL_WANTED));
2980 buf_busycount--;
2981
2982 lck_mtx_unlock(&buf_mtx);
2983
2984 if (need_wakeup) {
2985 /*
2986 * Wake up any processes waiting for any buffer to become free.
2987 */
2988 wakeup(&needbuffer);
2989 }
2990 if (need_bp_wakeup) {
2991 /*
2992 * Wake up any proceeses waiting for _this_ buffer to become free.
2993 */
2994 wakeup(bp);
2995 }
2996 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 388)) | DBG_FUNC_END,
2997 bp, bp->b_datap, bp->b_flags, 0, 0);
2998 }
2999
3000 /*
3001 * Determine if a block is in the cache.
3002 * Just look on what would be its hash chain. If it's there, return
3003 * a pointer to it, unless it's marked invalid. If it's marked invalid,
3004 * we normally don't return the buffer, unless the caller explicitly
3005 * wants us to.
3006 */
3007 static boolean_t
incore(vnode_t vp,daddr64_t blkno)3008 incore(vnode_t vp, daddr64_t blkno)
3009 {
3010 boolean_t retval;
3011 struct bufhashhdr *dp;
3012
3013 dp = BUFHASH(vp, blkno);
3014
3015 lck_mtx_lock_spin(&buf_mtx);
3016
3017 if (incore_locked(vp, blkno, dp)) {
3018 retval = TRUE;
3019 } else {
3020 retval = FALSE;
3021 }
3022 lck_mtx_unlock(&buf_mtx);
3023
3024 return retval;
3025 }
3026
3027
3028 static buf_t
incore_locked(vnode_t vp,daddr64_t blkno,struct bufhashhdr * dp)3029 incore_locked(vnode_t vp, daddr64_t blkno, struct bufhashhdr *dp)
3030 {
3031 struct buf *bp;
3032
3033 /* Search hash chain */
3034 for (bp = dp->lh_first; bp != NULL; bp = bp->b_hash.le_next) {
3035 if (bp->b_lblkno == blkno && bp->b_vp == vp &&
3036 !ISSET(bp->b_flags, B_INVAL)) {
3037 return bp;
3038 }
3039 }
3040 return NULL;
3041 }
3042
3043
3044 void
buf_wait_for_shadow_io(vnode_t vp,daddr64_t blkno)3045 buf_wait_for_shadow_io(vnode_t vp, daddr64_t blkno)
3046 {
3047 buf_t bp;
3048 struct bufhashhdr *dp;
3049
3050 dp = BUFHASH(vp, blkno);
3051
3052 lck_mtx_lock_spin(&buf_mtx);
3053
3054 for (;;) {
3055 if ((bp = incore_locked(vp, blkno, dp)) == NULL) {
3056 break;
3057 }
3058
3059 if (bp->b_shadow_ref == 0) {
3060 break;
3061 }
3062
3063 SET(bp->b_lflags, BL_WANTED_REF);
3064
3065 (void) msleep(bp, &buf_mtx, PSPIN | (PRIBIO + 1), "buf_wait_for_shadow", NULL);
3066 }
3067 lck_mtx_unlock(&buf_mtx);
3068 }
3069
3070 /* XXX FIXME -- Update the comment to reflect the UBC changes (please) -- */
3071 /*
3072 * Get a block of requested size that is associated with
3073 * a given vnode and block offset. If it is found in the
3074 * block cache, mark it as having been found, make it busy
3075 * and return it. Otherwise, return an empty block of the
3076 * correct size. It is up to the caller to insure that the
3077 * cached blocks be of the correct size.
3078 */
3079 buf_t
buf_getblk(vnode_t vp,daddr64_t blkno,int size,int slpflag,int slptimeo,int operation)3080 buf_getblk(vnode_t vp, daddr64_t blkno, int size, int slpflag, int slptimeo, int operation)
3081 {
3082 buf_t bp;
3083 int err;
3084 upl_t upl;
3085 upl_page_info_t *pl;
3086 kern_return_t kret;
3087 int ret_only_valid;
3088 struct timespec ts;
3089 int upl_flags;
3090 struct bufhashhdr *dp;
3091
3092 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 386)) | DBG_FUNC_START,
3093 (uintptr_t)(blkno * PAGE_SIZE), size, operation, 0, 0);
3094
3095 ret_only_valid = operation & BLK_ONLYVALID;
3096 operation &= ~BLK_ONLYVALID;
3097 dp = BUFHASH(vp, blkno);
3098 start:
3099 lck_mtx_lock_spin(&buf_mtx);
3100
3101 if ((bp = incore_locked(vp, blkno, dp))) {
3102 /*
3103 * Found in the Buffer Cache
3104 */
3105 if (ISSET(bp->b_lflags, BL_BUSY)) {
3106 /*
3107 * but is busy
3108 */
3109 switch (operation) {
3110 case BLK_READ:
3111 case BLK_WRITE:
3112 case BLK_META:
3113 SET(bp->b_lflags, BL_WANTED);
3114 bufstats.bufs_busyincore++;
3115
3116 /*
3117 * don't retake the mutex after being awakened...
3118 * the time out is in msecs
3119 */
3120 ts.tv_sec = (slptimeo / 1000);
3121 ts.tv_nsec = (slptimeo % 1000) * 10 * NSEC_PER_USEC * 1000;
3122
3123 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 396)) | DBG_FUNC_NONE,
3124 (uintptr_t)blkno, size, operation, 0, 0);
3125
3126 err = msleep(bp, &buf_mtx, slpflag | PDROP | (PRIBIO + 1), "buf_getblk", &ts);
3127
3128 /*
3129 * Callers who call with PCATCH or timeout are
3130 * willing to deal with the NULL pointer
3131 */
3132 if (err && ((slpflag & PCATCH) || ((err == EWOULDBLOCK) && slptimeo))) {
3133 return NULL;
3134 }
3135 goto start;
3136 /*NOTREACHED*/
3137
3138 default:
3139 /*
3140 * unknown operation requested
3141 */
3142 panic("getblk: paging or unknown operation for incore busy buffer - %x", operation);
3143 /*NOTREACHED*/
3144 break;
3145 }
3146 } else {
3147 int clear_bdone;
3148
3149 /*
3150 * buffer in core and not busy
3151 */
3152 SET(bp->b_lflags, BL_BUSY);
3153 SET(bp->b_flags, B_CACHE);
3154 buf_busycount++;
3155
3156 bremfree_locked(bp);
3157 bufstats.bufs_incore++;
3158
3159 lck_mtx_unlock(&buf_mtx);
3160 #ifdef JOE_DEBUG
3161 bp->b_owner = current_thread();
3162 bp->b_tag = 1;
3163 #endif
3164 if ((bp->b_upl)) {
3165 panic("buffer has UPL, but not marked BUSY: %p", bp);
3166 }
3167
3168 clear_bdone = FALSE;
3169 if (!ret_only_valid) {
3170 /*
3171 * If the number bytes that are valid is going
3172 * to increase (even if we end up not doing a
3173 * reallocation through allocbuf) we have to read
3174 * the new size first.
3175 *
3176 * This is required in cases where we doing a read
3177 * modify write of a already valid data on disk but
3178 * in cases where the data on disk beyond (blkno + b_bcount)
3179 * is invalid, we may end up doing extra I/O.
3180 */
3181 if (operation == BLK_META && bp->b_bcount < (uint32_t)size) {
3182 /*
3183 * Since we are going to read in the whole size first
3184 * we first have to ensure that any pending delayed write
3185 * is flushed to disk first.
3186 */
3187 if (ISSET(bp->b_flags, B_DELWRI)) {
3188 CLR(bp->b_flags, B_CACHE);
3189 buf_bwrite(bp);
3190 goto start;
3191 }
3192 /*
3193 * clear B_DONE before returning from
3194 * this function so that the caller can
3195 * can issue a read for the new size.
3196 */
3197 clear_bdone = TRUE;
3198 }
3199
3200 if (bp->b_bufsize != (uint32_t)size) {
3201 allocbuf(bp, size);
3202 }
3203 }
3204
3205 upl_flags = 0;
3206 switch (operation) {
3207 case BLK_WRITE:
3208 /*
3209 * "write" operation: let the UPL subsystem
3210 * know that we intend to modify the buffer
3211 * cache pages we're gathering.
3212 */
3213 upl_flags |= UPL_WILL_MODIFY;
3214 OS_FALLTHROUGH;
3215 case BLK_READ:
3216 upl_flags |= UPL_PRECIOUS;
3217 if (UBCINFOEXISTS(bp->b_vp) && bp->b_bufsize) {
3218 kret = ubc_create_upl_kernel(vp,
3219 ubc_blktooff(vp, bp->b_lblkno),
3220 bp->b_bufsize,
3221 &upl,
3222 &pl,
3223 upl_flags,
3224 VM_KERN_MEMORY_FILE);
3225 if (kret != KERN_SUCCESS) {
3226 panic("Failed to create UPL");
3227 }
3228
3229 bp->b_upl = upl;
3230
3231 if (upl_valid_page(pl, 0)) {
3232 if (upl_dirty_page(pl, 0)) {
3233 SET(bp->b_flags, B_WASDIRTY);
3234 } else {
3235 CLR(bp->b_flags, B_WASDIRTY);
3236 }
3237 } else {
3238 CLR(bp->b_flags, (B_DONE | B_CACHE | B_WASDIRTY | B_DELWRI));
3239 }
3240
3241 kret = ubc_upl_map(upl, (vm_offset_t*)&(bp->b_datap));
3242
3243 if (kret != KERN_SUCCESS) {
3244 panic("getblk: ubc_upl_map() failed with (%d)", kret);
3245 }
3246 }
3247 break;
3248
3249 case BLK_META:
3250 /*
3251 * VM is not involved in IO for the meta data
3252 * buffer already has valid data
3253 */
3254 break;
3255
3256 default:
3257 panic("getblk: paging or unknown operation for incore buffer- %d", operation);
3258 /*NOTREACHED*/
3259 break;
3260 }
3261
3262 if (clear_bdone) {
3263 CLR(bp->b_flags, B_DONE);
3264 }
3265 }
3266 } else { /* not incore() */
3267 int queue = BQ_EMPTY; /* Start with no preference */
3268
3269 if (ret_only_valid) {
3270 lck_mtx_unlock(&buf_mtx);
3271 return NULL;
3272 }
3273 if ((vnode_isreg(vp) == 0) || (UBCINFOEXISTS(vp) == 0) /*|| (vnode_issystem(vp) == 1)*/) {
3274 operation = BLK_META;
3275 }
3276
3277 if ((bp = getnewbuf(slpflag, slptimeo, &queue)) == NULL) {
3278 goto start;
3279 }
3280
3281 /*
3282 * getnewbuf may block for a number of different reasons...
3283 * if it does, it's then possible for someone else to
3284 * create a buffer for the same block and insert it into
3285 * the hash... if we see it incore at this point we dump
3286 * the buffer we were working on and start over
3287 */
3288 if (incore_locked(vp, blkno, dp)) {
3289 SET(bp->b_flags, B_INVAL);
3290 binshash(bp, &invalhash);
3291
3292 lck_mtx_unlock(&buf_mtx);
3293
3294 buf_brelse(bp);
3295 goto start;
3296 }
3297 /*
3298 * NOTE: YOU CAN NOT BLOCK UNTIL binshash() HAS BEEN
3299 * CALLED! BE CAREFUL.
3300 */
3301
3302 /*
3303 * mark the buffer as B_META if indicated
3304 * so that when buffer is released it will goto META queue
3305 */
3306 if (operation == BLK_META) {
3307 SET(bp->b_flags, B_META);
3308 }
3309
3310 bp->b_blkno = bp->b_lblkno = blkno;
3311 bp->b_lblksize = 0; /* Should be set by caller */
3312 bp->b_vp = vp;
3313
3314 /*
3315 * Insert in the hash so that incore() can find it
3316 */
3317 binshash(bp, BUFHASH(vp, blkno));
3318
3319 bgetvp_locked(vp, bp);
3320
3321 lck_mtx_unlock(&buf_mtx);
3322
3323 allocbuf(bp, size);
3324
3325 upl_flags = 0;
3326 switch (operation) {
3327 case BLK_META:
3328 /*
3329 * buffer data is invalid...
3330 *
3331 * I don't want to have to retake buf_mtx,
3332 * so the miss and vmhits counters are done
3333 * with Atomic updates... all other counters
3334 * in bufstats are protected with either
3335 * buf_mtx or iobuffer_mtxp
3336 */
3337 OSAddAtomicLong(1, &bufstats.bufs_miss);
3338 break;
3339
3340 case BLK_WRITE:
3341 /*
3342 * "write" operation: let the UPL subsystem know
3343 * that we intend to modify the buffer cache pages
3344 * we're gathering.
3345 */
3346 upl_flags |= UPL_WILL_MODIFY;
3347 OS_FALLTHROUGH;
3348 case BLK_READ:
3349 { off_t f_offset;
3350 size_t contig_bytes;
3351 int bmap_flags;
3352
3353 #if DEVELOPMENT || DEBUG
3354 /*
3355 * Apple implemented file systems use UBC excludively; they should
3356 * not call in here."
3357 */
3358 const char* excldfs[] = {"hfs", "afpfs", "smbfs", "acfs",
3359 "exfat", "msdos", "webdav", NULL};
3360
3361 for (int i = 0; excldfs[i] != NULL; i++) {
3362 if (vp->v_mount &&
3363 !strcmp(vp->v_mount->mnt_vfsstat.f_fstypename,
3364 excldfs[i])) {
3365 panic("%s %s calls buf_getblk",
3366 excldfs[i],
3367 operation == BLK_READ ? "BLK_READ" : "BLK_WRITE");
3368 }
3369 }
3370 #endif
3371
3372 if ((bp->b_upl)) {
3373 panic("bp already has UPL: %p", bp);
3374 }
3375
3376 f_offset = ubc_blktooff(vp, blkno);
3377
3378 upl_flags |= UPL_PRECIOUS;
3379 kret = ubc_create_upl_kernel(vp,
3380 f_offset,
3381 bp->b_bufsize,
3382 &upl,
3383 &pl,
3384 upl_flags,
3385 VM_KERN_MEMORY_FILE);
3386
3387 if (kret != KERN_SUCCESS) {
3388 panic("Failed to create UPL");
3389 }
3390 #if UPL_DEBUG
3391 upl_ubc_alias_set(upl, (uintptr_t) bp, (uintptr_t) 4);
3392 #endif /* UPL_DEBUG */
3393 bp->b_upl = upl;
3394
3395 if (upl_valid_page(pl, 0)) {
3396 if (operation == BLK_READ) {
3397 bmap_flags = VNODE_READ;
3398 } else {
3399 bmap_flags = VNODE_WRITE;
3400 }
3401
3402 SET(bp->b_flags, B_CACHE | B_DONE);
3403
3404 OSAddAtomicLong(1, &bufstats.bufs_vmhits);
3405
3406 bp->b_validoff = 0;
3407 bp->b_dirtyoff = 0;
3408
3409 if (upl_dirty_page(pl, 0)) {
3410 /* page is dirty */
3411 SET(bp->b_flags, B_WASDIRTY);
3412
3413 bp->b_validend = bp->b_bcount;
3414 bp->b_dirtyend = bp->b_bcount;
3415 } else {
3416 /* page is clean */
3417 bp->b_validend = bp->b_bcount;
3418 bp->b_dirtyend = 0;
3419 }
3420 /*
3421 * try to recreate the physical block number associated with
3422 * this buffer...
3423 */
3424 if (VNOP_BLOCKMAP(vp, f_offset, bp->b_bcount, &bp->b_blkno, &contig_bytes, NULL, bmap_flags, NULL)) {
3425 panic("getblk: VNOP_BLOCKMAP failed");
3426 }
3427 /*
3428 * if the extent represented by this buffer
3429 * is not completely physically contiguous on
3430 * disk, than we can't cache the physical mapping
3431 * in the buffer header
3432 */
3433 if ((uint32_t)contig_bytes < bp->b_bcount) {
3434 bp->b_blkno = bp->b_lblkno;
3435 }
3436 } else {
3437 OSAddAtomicLong(1, &bufstats.bufs_miss);
3438 }
3439 kret = ubc_upl_map(upl, (vm_offset_t *)&(bp->b_datap));
3440
3441 if (kret != KERN_SUCCESS) {
3442 panic("getblk: ubc_upl_map() failed with (%d)", kret);
3443 }
3444 break;} // end BLK_READ
3445 default:
3446 panic("getblk: paging or unknown operation - %x", operation);
3447 /*NOTREACHED*/
3448 break;
3449 } // end switch
3450 } //end buf_t !incore
3451
3452 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 386)) | DBG_FUNC_END,
3453 bp, bp->b_datap, bp->b_flags, 3, 0);
3454
3455 #ifdef JOE_DEBUG
3456 (void) OSBacktrace(&bp->b_stackgetblk[0], 6);
3457 #endif
3458 return bp;
3459 }
3460
3461 /*
3462 * Get an empty, disassociated buffer of given size.
3463 */
3464 buf_t
buf_geteblk(int size)3465 buf_geteblk(int size)
3466 {
3467 buf_t bp = NULL;
3468 int queue = BQ_EMPTY;
3469
3470 do {
3471 lck_mtx_lock_spin(&buf_mtx);
3472
3473 bp = getnewbuf(0, 0, &queue);
3474 } while (bp == NULL);
3475
3476 SET(bp->b_flags, (B_META | B_INVAL));
3477
3478 #if DIAGNOSTIC
3479 assert(queue == BQ_EMPTY);
3480 #endif /* DIAGNOSTIC */
3481 /* XXX need to implement logic to deal with other queues */
3482
3483 binshash(bp, &invalhash);
3484 bufstats.bufs_eblk++;
3485
3486 lck_mtx_unlock(&buf_mtx);
3487
3488 allocbuf(bp, size);
3489
3490 return bp;
3491 }
3492
3493 uint32_t
buf_redundancy_flags(buf_t bp)3494 buf_redundancy_flags(buf_t bp)
3495 {
3496 return bp->b_redundancy_flags;
3497 }
3498
3499 void
buf_set_redundancy_flags(buf_t bp,uint32_t flags)3500 buf_set_redundancy_flags(buf_t bp, uint32_t flags)
3501 {
3502 SET(bp->b_redundancy_flags, flags);
3503 }
3504
3505 void
buf_clear_redundancy_flags(buf_t bp,uint32_t flags)3506 buf_clear_redundancy_flags(buf_t bp, uint32_t flags)
3507 {
3508 CLR(bp->b_redundancy_flags, flags);
3509 }
3510
3511
3512
3513 static void *
recycle_buf_from_pool(int nsize)3514 recycle_buf_from_pool(int nsize)
3515 {
3516 buf_t bp;
3517 void *ptr = NULL;
3518
3519 lck_mtx_lock_spin(&buf_mtx);
3520
3521 TAILQ_FOREACH(bp, &bufqueues[BQ_META], b_freelist) {
3522 if (ISSET(bp->b_flags, B_DELWRI) || bp->b_bufsize != (uint32_t)nsize) {
3523 continue;
3524 }
3525 ptr = (void *)bp->b_datap;
3526 bp->b_bufsize = 0;
3527
3528 bcleanbuf(bp, TRUE);
3529 break;
3530 }
3531 lck_mtx_unlock(&buf_mtx);
3532
3533 return ptr;
3534 }
3535
3536
3537
3538 int zalloc_nopagewait_failed = 0;
3539 int recycle_buf_failed = 0;
3540
3541 static void *
grab_memory_for_meta_buf(int nsize)3542 grab_memory_for_meta_buf(int nsize)
3543 {
3544 void *ptr;
3545 boolean_t was_vmpriv;
3546
3547
3548 /*
3549 * make sure we're NOT priviliged so that
3550 * if a vm_page_grab is needed, it won't
3551 * block if we're out of free pages... if
3552 * it blocks, then we can't honor the
3553 * nopagewait request
3554 */
3555 was_vmpriv = set_vm_privilege(FALSE);
3556
3557 ptr = kheap_alloc(KHEAP_VFS_BIO, nsize, Z_NOPAGEWAIT);
3558
3559 if (was_vmpriv == TRUE) {
3560 set_vm_privilege(TRUE);
3561 }
3562
3563 if (ptr == NULL) {
3564 zalloc_nopagewait_failed++;
3565
3566 ptr = recycle_buf_from_pool(nsize);
3567
3568 if (ptr == NULL) {
3569 recycle_buf_failed++;
3570
3571 if (was_vmpriv == FALSE) {
3572 set_vm_privilege(TRUE);
3573 }
3574
3575 ptr = kheap_alloc(KHEAP_VFS_BIO, nsize, Z_WAITOK);
3576
3577 if (was_vmpriv == FALSE) {
3578 set_vm_privilege(FALSE);
3579 }
3580 }
3581 }
3582 return ptr;
3583 }
3584
3585 /*
3586 * With UBC, there is no need to expand / shrink the file data
3587 * buffer. The VM uses the same pages, hence no waste.
3588 * All the file data buffers can have one size.
3589 * In fact expand / shrink would be an expensive operation.
3590 *
3591 * Only exception to this is meta-data buffers. Most of the
3592 * meta data operations are smaller than PAGE_SIZE. Having the
3593 * meta-data buffers grow and shrink as needed, optimizes use
3594 * of the kernel wired memory.
3595 */
3596
3597 int
allocbuf(buf_t bp,int size)3598 allocbuf(buf_t bp, int size)
3599 {
3600 vm_size_t desired_size;
3601
3602 desired_size = roundup(size, CLBYTES);
3603
3604 if (desired_size < PAGE_SIZE) {
3605 desired_size = PAGE_SIZE;
3606 }
3607 if (desired_size > MAXBSIZE) {
3608 panic("allocbuf: buffer larger than MAXBSIZE requested");
3609 }
3610
3611 if (ISSET(bp->b_flags, B_META)) {
3612 int nsize = roundup(size, MINMETA);
3613
3614 if (bp->b_datap) {
3615 void *elem = (void *)bp->b_datap;
3616
3617 if (ISSET(bp->b_flags, B_ZALLOC)) {
3618 if (bp->b_bufsize < (uint32_t)nsize) {
3619 /* reallocate to a bigger size */
3620
3621 if (nsize <= MAXMETA) {
3622 desired_size = nsize;
3623
3624 /* b_datap not really a ptr */
3625 *(void **)(&bp->b_datap) = grab_memory_for_meta_buf(nsize);
3626 } else {
3627 bp->b_datap = (uintptr_t)NULL;
3628 kmem_alloc(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size,
3629 KMA_KOBJECT | KMA_DATA | KMA_NOFAIL,
3630 VM_KERN_MEMORY_FILE);
3631 CLR(bp->b_flags, B_ZALLOC);
3632 }
3633 bcopy(elem, (caddr_t)bp->b_datap, bp->b_bufsize);
3634 kheap_free(KHEAP_VFS_BIO, elem, bp->b_bufsize);
3635 } else {
3636 desired_size = bp->b_bufsize;
3637 }
3638 } else {
3639 if ((vm_size_t)bp->b_bufsize < desired_size) {
3640 /* reallocate to a bigger size */
3641 bp->b_datap = (uintptr_t)NULL;
3642 kmem_alloc(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size,
3643 KMA_KOBJECT | KMA_DATA | KMA_NOFAIL,
3644 VM_KERN_MEMORY_FILE);
3645 bcopy(elem, (caddr_t)bp->b_datap, bp->b_bufsize);
3646 kmem_free(kernel_map, (vm_offset_t)elem, bp->b_bufsize);
3647 } else {
3648 desired_size = bp->b_bufsize;
3649 }
3650 }
3651 } else {
3652 /* new allocation */
3653 if (nsize <= MAXMETA) {
3654 desired_size = nsize;
3655
3656 /* b_datap not really a ptr */
3657 *(void **)(&bp->b_datap) = grab_memory_for_meta_buf(nsize);
3658 SET(bp->b_flags, B_ZALLOC);
3659 } else {
3660 kmem_alloc(kernel_map, (vm_offset_t *)&bp->b_datap, desired_size,
3661 KMA_KOBJECT | KMA_DATA | KMA_NOFAIL,
3662 VM_KERN_MEMORY_FILE);
3663 }
3664 }
3665 }
3666 bp->b_bufsize = (uint32_t)desired_size;
3667 bp->b_bcount = size;
3668
3669 return 0;
3670 }
3671
3672 /*
3673 * Get a new buffer from one of the free lists.
3674 *
3675 * Request for a queue is passes in. The queue from which the buffer was taken
3676 * from is returned. Out of range queue requests get BQ_EMPTY. Request for
3677 * BQUEUE means no preference. Use heuristics in that case.
3678 * Heuristics is as follows:
3679 * Try BQ_AGE, BQ_LRU, BQ_EMPTY, BQ_META in that order.
3680 * If none available block till one is made available.
3681 * If buffers available on both BQ_AGE and BQ_LRU, check the timestamps.
3682 * Pick the most stale buffer.
3683 * If found buffer was marked delayed write, start the async. write
3684 * and restart the search.
3685 * Initialize the fields and disassociate the buffer from the vnode.
3686 * Remove the buffer from the hash. Return the buffer and the queue
3687 * on which it was found.
3688 *
3689 * buf_mtx is held upon entry
3690 * returns with buf_mtx locked if new buf available
3691 * returns with buf_mtx UNlocked if new buf NOT available
3692 */
3693
3694 static buf_t
getnewbuf(int slpflag,int slptimeo,int * queue)3695 getnewbuf(int slpflag, int slptimeo, int * queue)
3696 {
3697 buf_t bp;
3698 buf_t lru_bp;
3699 buf_t age_bp;
3700 buf_t meta_bp;
3701 int age_time, lru_time, bp_time, meta_time;
3702 int req = *queue; /* save it for restarts */
3703 struct timespec ts;
3704
3705 start:
3706 /*
3707 * invalid request gets empty queue
3708 */
3709 if ((*queue >= BQUEUES) || (*queue < 0)
3710 || (*queue == BQ_LAUNDRY) || (*queue == BQ_LOCKED)) {
3711 *queue = BQ_EMPTY;
3712 }
3713
3714
3715 if (*queue == BQ_EMPTY && (bp = bufqueues[*queue].tqh_first)) {
3716 goto found;
3717 }
3718
3719 /*
3720 * need to grow number of bufs, add another one rather than recycling
3721 */
3722 if (nbuf_headers < max_nbuf_headers) {
3723 /*
3724 * Increment count now as lock
3725 * is dropped for allocation.
3726 * That avoids over commits
3727 */
3728 nbuf_headers++;
3729 goto add_newbufs;
3730 }
3731 /* Try for the requested queue first */
3732 bp = bufqueues[*queue].tqh_first;
3733 if (bp) {
3734 goto found;
3735 }
3736
3737 /* Unable to use requested queue */
3738 age_bp = bufqueues[BQ_AGE].tqh_first;
3739 lru_bp = bufqueues[BQ_LRU].tqh_first;
3740 meta_bp = bufqueues[BQ_META].tqh_first;
3741
3742 if (!age_bp && !lru_bp && !meta_bp) {
3743 /*
3744 * Unavailble on AGE or LRU or META queues
3745 * Try the empty list first
3746 */
3747 bp = bufqueues[BQ_EMPTY].tqh_first;
3748 if (bp) {
3749 *queue = BQ_EMPTY;
3750 goto found;
3751 }
3752 /*
3753 * We have seen is this is hard to trigger.
3754 * This is an overcommit of nbufs but needed
3755 * in some scenarios with diskiamges
3756 */
3757
3758 add_newbufs:
3759 lck_mtx_unlock(&buf_mtx);
3760
3761 /* Create a new temporary buffer header */
3762 bp = zalloc_flags(buf_hdr_zone, Z_WAITOK | Z_NOFAIL);
3763 bufhdrinit(bp);
3764 bp->b_whichq = BQ_EMPTY;
3765 bp->b_timestamp = buf_timestamp();
3766 BLISTNONE(bp);
3767 SET(bp->b_flags, B_HDRALLOC);
3768 *queue = BQ_EMPTY;
3769 lck_mtx_lock_spin(&buf_mtx);
3770
3771 if (bp) {
3772 binshash(bp, &invalhash);
3773 binsheadfree(bp, &bufqueues[BQ_EMPTY], BQ_EMPTY);
3774 buf_hdr_count++;
3775 goto found;
3776 }
3777 /* subtract already accounted bufcount */
3778 nbuf_headers--;
3779
3780 bufstats.bufs_sleeps++;
3781
3782 /* wait for a free buffer of any kind */
3783 needbuffer = 1;
3784 /* hz value is 100 */
3785 ts.tv_sec = (slptimeo / 1000);
3786 /* the hz value is 100; which leads to 10ms */
3787 ts.tv_nsec = (slptimeo % 1000) * NSEC_PER_USEC * 1000 * 10;
3788
3789 msleep(&needbuffer, &buf_mtx, slpflag | PDROP | (PRIBIO + 1), "getnewbuf", &ts);
3790 return NULL;
3791 }
3792
3793 /* Buffer available either on AGE or LRU or META */
3794 bp = NULL;
3795 *queue = -1;
3796
3797 /* Buffer available either on AGE or LRU */
3798 if (!age_bp) {
3799 bp = lru_bp;
3800 *queue = BQ_LRU;
3801 } else if (!lru_bp) {
3802 bp = age_bp;
3803 *queue = BQ_AGE;
3804 } else { /* buffer available on both AGE and LRU */
3805 int t = buf_timestamp();
3806
3807 age_time = t - age_bp->b_timestamp;
3808 lru_time = t - lru_bp->b_timestamp;
3809 if ((age_time < 0) || (lru_time < 0)) { /* time set backwards */
3810 bp = age_bp;
3811 *queue = BQ_AGE;
3812 /*
3813 * we should probably re-timestamp eveything in the
3814 * queues at this point with the current time
3815 */
3816 } else {
3817 if ((lru_time >= lru_is_stale) && (age_time < age_is_stale)) {
3818 bp = lru_bp;
3819 *queue = BQ_LRU;
3820 } else {
3821 bp = age_bp;
3822 *queue = BQ_AGE;
3823 }
3824 }
3825 }
3826
3827 if (!bp) { /* Neither on AGE nor on LRU */
3828 bp = meta_bp;
3829 *queue = BQ_META;
3830 } else if (meta_bp) {
3831 int t = buf_timestamp();
3832
3833 bp_time = t - bp->b_timestamp;
3834 meta_time = t - meta_bp->b_timestamp;
3835
3836 if (!(bp_time < 0) && !(meta_time < 0)) {
3837 /* time not set backwards */
3838 int bp_is_stale;
3839 bp_is_stale = (*queue == BQ_LRU) ?
3840 lru_is_stale : age_is_stale;
3841
3842 if ((meta_time >= meta_is_stale) &&
3843 (bp_time < bp_is_stale)) {
3844 bp = meta_bp;
3845 *queue = BQ_META;
3846 }
3847 }
3848 }
3849 found:
3850 if (ISSET(bp->b_flags, B_LOCKED) || ISSET(bp->b_lflags, BL_BUSY)) {
3851 panic("getnewbuf: bp @ %p is LOCKED or BUSY! (flags 0x%x)", bp, bp->b_flags);
3852 }
3853
3854 /* Clean it */
3855 if (bcleanbuf(bp, FALSE)) {
3856 /*
3857 * moved to the laundry thread, buffer not ready
3858 */
3859 *queue = req;
3860 goto start;
3861 }
3862 return bp;
3863 }
3864
3865
3866 /*
3867 * Clean a buffer.
3868 * Returns 0 if buffer is ready to use,
3869 * Returns 1 if issued a buf_bawrite() to indicate
3870 * that the buffer is not ready.
3871 *
3872 * buf_mtx is held upon entry
3873 * returns with buf_mtx locked
3874 */
3875 int
bcleanbuf(buf_t bp,boolean_t discard)3876 bcleanbuf(buf_t bp, boolean_t discard)
3877 {
3878 /* Remove from the queue */
3879 bremfree_locked(bp);
3880
3881 #ifdef JOE_DEBUG
3882 bp->b_owner = current_thread();
3883 bp->b_tag = 2;
3884 #endif
3885 /*
3886 * If buffer was a delayed write, start the IO by queuing
3887 * it on the LAUNDRY queue, and return 1
3888 */
3889 if (ISSET(bp->b_flags, B_DELWRI)) {
3890 if (discard) {
3891 SET(bp->b_lflags, BL_WANTDEALLOC);
3892 }
3893
3894 bmovelaundry(bp);
3895
3896 lck_mtx_unlock(&buf_mtx);
3897
3898 wakeup(&bufqueues[BQ_LAUNDRY]);
3899 /*
3900 * and give it a chance to run
3901 */
3902 (void)thread_block(THREAD_CONTINUE_NULL);
3903
3904 lck_mtx_lock_spin(&buf_mtx);
3905
3906 return 1;
3907 }
3908 #ifdef JOE_DEBUG
3909 bp->b_owner = current_thread();
3910 bp->b_tag = 8;
3911 #endif
3912 /*
3913 * Buffer is no longer on any free list... we own it
3914 */
3915 SET(bp->b_lflags, BL_BUSY);
3916 buf_busycount++;
3917
3918 bremhash(bp);
3919
3920 /*
3921 * disassociate us from our vnode, if we had one...
3922 */
3923 if (bp->b_vp) {
3924 brelvp_locked(bp);
3925 }
3926
3927 lck_mtx_unlock(&buf_mtx);
3928
3929 BLISTNONE(bp);
3930
3931 if (ISSET(bp->b_flags, B_META)) {
3932 buf_free_meta_store(bp);
3933 }
3934
3935 trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
3936
3937 buf_release_credentials(bp);
3938
3939 /* If discarding, just move to the empty queue */
3940 if (discard) {
3941 lck_mtx_lock_spin(&buf_mtx);
3942 CLR(bp->b_flags, (B_META | B_ZALLOC | B_DELWRI | B_LOCKED | B_AGE | B_ASYNC | B_NOCACHE | B_FUA));
3943 bp->b_whichq = BQ_EMPTY;
3944 binshash(bp, &invalhash);
3945 binsheadfree(bp, &bufqueues[BQ_EMPTY], BQ_EMPTY);
3946 CLR(bp->b_lflags, BL_BUSY);
3947 buf_busycount--;
3948 } else {
3949 /* Not discarding: clean up and prepare for reuse */
3950 bp->b_bufsize = 0;
3951 bp->b_datap = (uintptr_t)NULL;
3952 bp->b_upl = (void *)NULL;
3953 bp->b_fsprivate = (void *)NULL;
3954 /*
3955 * preserve the state of whether this buffer
3956 * was allocated on the fly or not...
3957 * the only other flag that should be set at
3958 * this point is BL_BUSY...
3959 */
3960 #ifdef JOE_DEBUG
3961 bp->b_owner = current_thread();
3962 bp->b_tag = 3;
3963 #endif
3964 bp->b_lflags = BL_BUSY;
3965 bp->b_flags = (bp->b_flags & B_HDRALLOC);
3966 bp->b_redundancy_flags = 0;
3967 bp->b_dev = NODEV;
3968 bp->b_blkno = bp->b_lblkno = 0;
3969 bp->b_lblksize = 0;
3970 bp->b_iodone = NULL;
3971 bp->b_error = 0;
3972 bp->b_resid = 0;
3973 bp->b_bcount = 0;
3974 bp->b_dirtyoff = bp->b_dirtyend = 0;
3975 bp->b_validoff = bp->b_validend = 0;
3976 bzero(&bp->b_attr, sizeof(struct bufattr));
3977
3978 lck_mtx_lock_spin(&buf_mtx);
3979 }
3980 return 0;
3981 }
3982
3983
3984
3985 errno_t
buf_invalblkno(vnode_t vp,daddr64_t lblkno,int flags)3986 buf_invalblkno(vnode_t vp, daddr64_t lblkno, int flags)
3987 {
3988 buf_t bp;
3989 errno_t error;
3990 struct bufhashhdr *dp;
3991
3992 dp = BUFHASH(vp, lblkno);
3993
3994 relook:
3995 lck_mtx_lock_spin(&buf_mtx);
3996
3997 if ((bp = incore_locked(vp, lblkno, dp)) == (struct buf *)0) {
3998 lck_mtx_unlock(&buf_mtx);
3999 return 0;
4000 }
4001 if (ISSET(bp->b_lflags, BL_BUSY)) {
4002 if (!ISSET(flags, BUF_WAIT)) {
4003 lck_mtx_unlock(&buf_mtx);
4004 return EBUSY;
4005 }
4006 SET(bp->b_lflags, BL_WANTED);
4007
4008 error = msleep((caddr_t)bp, &buf_mtx, PDROP | (PRIBIO + 1), "buf_invalblkno", NULL);
4009
4010 if (error) {
4011 return error;
4012 }
4013 goto relook;
4014 }
4015 bremfree_locked(bp);
4016 SET(bp->b_lflags, BL_BUSY);
4017 SET(bp->b_flags, B_INVAL);
4018 buf_busycount++;
4019 #ifdef JOE_DEBUG
4020 bp->b_owner = current_thread();
4021 bp->b_tag = 4;
4022 #endif
4023 lck_mtx_unlock(&buf_mtx);
4024 buf_brelse(bp);
4025
4026 return 0;
4027 }
4028
4029
4030 void
buf_drop(buf_t bp)4031 buf_drop(buf_t bp)
4032 {
4033 int need_wakeup = 0;
4034
4035 lck_mtx_lock_spin(&buf_mtx);
4036
4037 if (ISSET(bp->b_lflags, BL_WANTED)) {
4038 /*
4039 * delay the actual wakeup until after we
4040 * clear BL_BUSY and we've dropped buf_mtx
4041 */
4042 need_wakeup = 1;
4043 }
4044 #ifdef JOE_DEBUG
4045 bp->b_owner = current_thread();
4046 bp->b_tag = 9;
4047 #endif
4048 /*
4049 * Unlock the buffer.
4050 */
4051 CLR(bp->b_lflags, (BL_BUSY | BL_WANTED));
4052 buf_busycount--;
4053
4054 lck_mtx_unlock(&buf_mtx);
4055
4056 if (need_wakeup) {
4057 /*
4058 * Wake up any proceeses waiting for _this_ buffer to become free.
4059 */
4060 wakeup(bp);
4061 }
4062 }
4063
4064
4065 errno_t
buf_acquire(buf_t bp,int flags,int slpflag,int slptimeo)4066 buf_acquire(buf_t bp, int flags, int slpflag, int slptimeo)
4067 {
4068 errno_t error;
4069
4070 lck_mtx_lock_spin(&buf_mtx);
4071
4072 error = buf_acquire_locked(bp, flags, slpflag, slptimeo);
4073
4074 lck_mtx_unlock(&buf_mtx);
4075
4076 return error;
4077 }
4078
4079
4080 static errno_t
buf_acquire_locked(buf_t bp,int flags,int slpflag,int slptimeo)4081 buf_acquire_locked(buf_t bp, int flags, int slpflag, int slptimeo)
4082 {
4083 errno_t error;
4084 struct timespec ts;
4085
4086 if (ISSET(bp->b_flags, B_LOCKED)) {
4087 if ((flags & BAC_SKIP_LOCKED)) {
4088 return EDEADLK;
4089 }
4090 } else {
4091 if ((flags & BAC_SKIP_NONLOCKED)) {
4092 return EDEADLK;
4093 }
4094 }
4095 if (ISSET(bp->b_lflags, BL_BUSY)) {
4096 /*
4097 * since the lck_mtx_lock may block, the buffer
4098 * may become BUSY, so we need to
4099 * recheck for a NOWAIT request
4100 */
4101 if (flags & BAC_NOWAIT) {
4102 return EBUSY;
4103 }
4104 SET(bp->b_lflags, BL_WANTED);
4105
4106 /* the hz value is 100; which leads to 10ms */
4107 ts.tv_sec = (slptimeo / 100);
4108 ts.tv_nsec = (slptimeo % 100) * 10 * NSEC_PER_USEC * 1000;
4109 error = msleep((caddr_t)bp, &buf_mtx, slpflag | (PRIBIO + 1), "buf_acquire", &ts);
4110
4111 if (error) {
4112 return error;
4113 }
4114 return EAGAIN;
4115 }
4116 if (flags & BAC_REMOVE) {
4117 bremfree_locked(bp);
4118 }
4119 SET(bp->b_lflags, BL_BUSY);
4120 buf_busycount++;
4121
4122 #ifdef JOE_DEBUG
4123 bp->b_owner = current_thread();
4124 bp->b_tag = 5;
4125 #endif
4126 return 0;
4127 }
4128
4129
4130 /*
4131 * Wait for operations on the buffer to complete.
4132 * When they do, extract and return the I/O's error value.
4133 */
4134 errno_t
buf_biowait(buf_t bp)4135 buf_biowait(buf_t bp)
4136 {
4137 while (!ISSET(bp->b_flags, B_DONE)) {
4138 lck_mtx_lock_spin(&buf_mtx);
4139
4140 if (!ISSET(bp->b_flags, B_DONE)) {
4141 DTRACE_IO1(wait__start, buf_t, bp);
4142 (void) msleep(bp, &buf_mtx, PDROP | (PRIBIO + 1), "buf_biowait", NULL);
4143 DTRACE_IO1(wait__done, buf_t, bp);
4144 } else {
4145 lck_mtx_unlock(&buf_mtx);
4146 }
4147 }
4148 /* check for interruption of I/O (e.g. via NFS), then errors. */
4149 if (ISSET(bp->b_flags, B_EINTR)) {
4150 CLR(bp->b_flags, B_EINTR);
4151 return EINTR;
4152 } else if (ISSET(bp->b_flags, B_ERROR)) {
4153 return bp->b_error ? bp->b_error : EIO;
4154 } else {
4155 return 0;
4156 }
4157 }
4158
4159
4160 /*
4161 * Mark I/O complete on a buffer.
4162 *
4163 * If a callback has been requested, e.g. the pageout
4164 * daemon, do so. Otherwise, awaken waiting processes.
4165 *
4166 * [ Leffler, et al., says on p.247:
4167 * "This routine wakes up the blocked process, frees the buffer
4168 * for an asynchronous write, or, for a request by the pagedaemon
4169 * process, invokes a procedure specified in the buffer structure" ]
4170 *
4171 * In real life, the pagedaemon (or other system processes) wants
4172 * to do async stuff to, and doesn't want the buffer buf_brelse()'d.
4173 * (for swap pager, that puts swap buffers on the free lists (!!!),
4174 * for the vn device, that puts malloc'd buffers on the free lists!)
4175 */
4176
4177 void
buf_biodone(buf_t bp)4178 buf_biodone(buf_t bp)
4179 {
4180 mount_t mp;
4181 struct bufattr *bap;
4182 struct timeval real_elapsed;
4183 uint64_t real_elapsed_usec = 0;
4184
4185 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 387)) | DBG_FUNC_START,
4186 bp, bp->b_datap, bp->b_flags, 0, 0);
4187
4188 /* Record our progress. */
4189 vfs_update_last_completion_time();
4190
4191 if (ISSET(bp->b_flags, B_DONE)) {
4192 panic("biodone already");
4193 }
4194
4195 bap = &bp->b_attr;
4196
4197 if (bp->b_vp && bp->b_vp->v_mount) {
4198 mp = bp->b_vp->v_mount;
4199 } else {
4200 mp = NULL;
4201 }
4202
4203 if (ISSET(bp->b_flags, B_ERROR)) {
4204 if (mp && (MNT_ROOTFS & mp->mnt_flag)) {
4205 dk_error_description_t desc;
4206 bzero(&desc, sizeof(desc));
4207 desc.description = panic_disk_error_description;
4208 desc.description_size = panic_disk_error_description_size;
4209 VNOP_IOCTL(mp->mnt_devvp, DKIOCGETERRORDESCRIPTION, (caddr_t)&desc, 0, vfs_context_kernel());
4210 }
4211 }
4212
4213 if (mp && (bp->b_flags & B_READ) == 0) {
4214 update_last_io_time(mp);
4215 INCR_PENDING_IO(-(pending_io_t)buf_count(bp), mp->mnt_pending_write_size);
4216 } else if (mp) {
4217 INCR_PENDING_IO(-(pending_io_t)buf_count(bp), mp->mnt_pending_read_size);
4218 }
4219
4220 throttle_info_end_io(bp);
4221
4222 if (kdebug_enable) {
4223 int code = DKIO_DONE;
4224 int io_tier = GET_BUFATTR_IO_TIER(bap);
4225
4226 if (bp->b_flags & B_READ) {
4227 code |= DKIO_READ;
4228 }
4229 if (bp->b_flags & B_ASYNC) {
4230 code |= DKIO_ASYNC;
4231 }
4232
4233 if (bp->b_flags & B_META) {
4234 code |= DKIO_META;
4235 } else if (bp->b_flags & B_PAGEIO) {
4236 code |= DKIO_PAGING;
4237 }
4238
4239 if (io_tier != 0) {
4240 code |= DKIO_THROTTLE;
4241 }
4242
4243 code |= ((io_tier << DKIO_TIER_SHIFT) & DKIO_TIER_MASK);
4244
4245 if (bp->b_flags & B_PASSIVE) {
4246 code |= DKIO_PASSIVE;
4247 }
4248
4249 if (bap->ba_flags & BA_NOCACHE) {
4250 code |= DKIO_NOCACHE;
4251 }
4252
4253 if (bap->ba_flags & BA_IO_TIER_UPGRADE) {
4254 code |= DKIO_TIER_UPGRADE;
4255 }
4256
4257 KDBG_RELEASE_NOPROCFILT(FSDBG_CODE(DBG_DKRW, code),
4258 buf_kernel_addrperm_addr(bp),
4259 (uintptr_t)VM_KERNEL_ADDRPERM(bp->b_vp), bp->b_resid,
4260 bp->b_error);
4261 }
4262
4263 microuptime(&real_elapsed);
4264 timevalsub(&real_elapsed, &bp->b_timestamp_tv);
4265 real_elapsed_usec = real_elapsed.tv_sec * USEC_PER_SEC + real_elapsed.tv_usec;
4266 disk_conditioner_delay(bp, 1, bp->b_bcount, real_elapsed_usec);
4267
4268 /*
4269 * I/O was done, so don't believe
4270 * the DIRTY state from VM anymore...
4271 * and we need to reset the THROTTLED/PASSIVE
4272 * indicators
4273 */
4274 CLR(bp->b_flags, (B_WASDIRTY | B_PASSIVE));
4275 CLR(bap->ba_flags, (BA_META | BA_NOCACHE | BA_DELAYIDLESLEEP | BA_IO_TIER_UPGRADE));
4276
4277 SET_BUFATTR_IO_TIER(bap, 0);
4278
4279 DTRACE_IO1(done, buf_t, bp);
4280
4281 if (!ISSET(bp->b_flags, B_READ) && !ISSET(bp->b_flags, B_RAW)) {
4282 /*
4283 * wake up any writer's blocked
4284 * on throttle or waiting for I/O
4285 * to drain
4286 */
4287 vnode_writedone(bp->b_vp);
4288 }
4289
4290 if (ISSET(bp->b_flags, (B_CALL | B_FILTER))) { /* if necessary, call out */
4291 void (*iodone_func)(struct buf *, void *) = bp->b_iodone;
4292 void *arg = bp->b_transaction;
4293 int callout = ISSET(bp->b_flags, B_CALL);
4294
4295 if (iodone_func == NULL) {
4296 panic("biodone: bp @ %p has NULL b_iodone!", bp);
4297 }
4298
4299 CLR(bp->b_flags, (B_CALL | B_FILTER)); /* filters and callouts are one-shot */
4300 bp->b_iodone = NULL;
4301 bp->b_transaction = NULL;
4302
4303 if (callout) {
4304 SET(bp->b_flags, B_DONE); /* note that it's done */
4305 }
4306 (*iodone_func)(bp, arg);
4307
4308 if (callout) {
4309 /*
4310 * assumes that the callback function takes
4311 * ownership of the bp and deals with releasing it if necessary
4312 */
4313 goto biodone_done;
4314 }
4315 /*
4316 * in this case the call back function is acting
4317 * strictly as a filter... it does not take
4318 * ownership of the bp and is expecting us
4319 * to finish cleaning up... this is currently used
4320 * by the HFS journaling code
4321 */
4322 }
4323 if (ISSET(bp->b_flags, B_ASYNC)) { /* if async, release it */
4324 SET(bp->b_flags, B_DONE); /* note that it's done */
4325
4326 buf_brelse(bp);
4327 } else { /* or just wakeup the buffer */
4328 /*
4329 * by taking the mutex, we serialize
4330 * the buf owner calling buf_biowait so that we'll
4331 * only see him in one of 2 states...
4332 * state 1: B_DONE wasn't set and he's
4333 * blocked in msleep
4334 * state 2: he's blocked trying to take the
4335 * mutex before looking at B_DONE
4336 * BL_WANTED is cleared in case anyone else
4337 * is blocked waiting for the buffer... note
4338 * that we haven't cleared B_BUSY yet, so if
4339 * they do get to run, their going to re-set
4340 * BL_WANTED and go back to sleep
4341 */
4342 lck_mtx_lock_spin(&buf_mtx);
4343
4344 CLR(bp->b_lflags, BL_WANTED);
4345 SET(bp->b_flags, B_DONE); /* note that it's done */
4346
4347 lck_mtx_unlock(&buf_mtx);
4348
4349 wakeup(bp);
4350 }
4351 biodone_done:
4352 KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 387)) | DBG_FUNC_END,
4353 (uintptr_t)bp, (uintptr_t)bp->b_datap, bp->b_flags, 0, 0);
4354 }
4355
4356 /*
4357 * Obfuscate buf pointers.
4358 */
4359 vm_offset_t
buf_kernel_addrperm_addr(void * addr)4360 buf_kernel_addrperm_addr(void * addr)
4361 {
4362 if ((vm_offset_t)addr == 0) {
4363 return 0;
4364 } else {
4365 return (vm_offset_t)addr + buf_kernel_addrperm;
4366 }
4367 }
4368
4369 /*
4370 * Return a count of buffers on the "locked" queue.
4371 */
4372 int
count_lock_queue(void)4373 count_lock_queue(void)
4374 {
4375 buf_t bp;
4376 int n = 0;
4377
4378 lck_mtx_lock_spin(&buf_mtx);
4379
4380 for (bp = bufqueues[BQ_LOCKED].tqh_first; bp;
4381 bp = bp->b_freelist.tqe_next) {
4382 n++;
4383 }
4384 lck_mtx_unlock(&buf_mtx);
4385
4386 return n;
4387 }
4388
4389 /*
4390 * Return a count of 'busy' buffers. Used at the time of shutdown.
4391 * note: This is also called from the mach side in debug context in kdp.c
4392 */
4393 uint32_t
count_busy_buffers(void)4394 count_busy_buffers(void)
4395 {
4396 return buf_busycount + bufstats.bufs_iobufinuse;
4397 }
4398
4399 #if DIAGNOSTIC
4400 /*
4401 * Print out statistics on the current allocation of the buffer pool.
4402 * Can be enabled to print out on every ``sync'' by setting "syncprt"
4403 * in vfs_syscalls.c using sysctl.
4404 */
4405 void
vfs_bufstats()4406 vfs_bufstats()
4407 {
4408 int i, j, count;
4409 struct buf *bp;
4410 struct bqueues *dp;
4411 int counts[MAXBSIZE / CLBYTES + 1];
4412 static char *bname[BQUEUES] =
4413 { "LOCKED", "LRU", "AGE", "EMPTY", "META", "LAUNDRY" };
4414
4415 for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
4416 count = 0;
4417 for (j = 0; j <= MAXBSIZE / CLBYTES; j++) {
4418 counts[j] = 0;
4419 }
4420
4421 lck_mtx_lock(&buf_mtx);
4422
4423 for (bp = dp->tqh_first; bp; bp = bp->b_freelist.tqe_next) {
4424 counts[bp->b_bufsize / CLBYTES]++;
4425 count++;
4426 }
4427 lck_mtx_unlock(&buf_mtx);
4428
4429 printf("%s: total-%d", bname[i], count);
4430 for (j = 0; j <= MAXBSIZE / CLBYTES; j++) {
4431 if (counts[j] != 0) {
4432 printf(", %d-%d", j * CLBYTES, counts[j]);
4433 }
4434 }
4435 printf("\n");
4436 }
4437 }
4438 #endif /* DIAGNOSTIC */
4439
4440 #define NRESERVEDIOBUFS 128
4441
4442 #define MNT_VIRTUALDEV_MAX_IOBUFS 128
4443 #define VIRTUALDEV_MAX_IOBUFS ((40*niobuf_headers)/100)
4444
4445 buf_t
alloc_io_buf(vnode_t vp,int priv)4446 alloc_io_buf(vnode_t vp, int priv)
4447 {
4448 buf_t bp;
4449 mount_t mp = NULL;
4450 int alloc_for_virtualdev = FALSE;
4451
4452 lck_mtx_lock_spin(&iobuffer_mtxp);
4453
4454 /*
4455 * We subject iobuf requests for diskimages to additional restrictions.
4456 *
4457 * a) A single diskimage mount cannot use up more than
4458 * MNT_VIRTUALDEV_MAX_IOBUFS. However,vm privileged (pageout) requests
4459 * are not subject to this restriction.
4460 * b) iobuf headers used by all diskimage headers by all mount
4461 * points cannot exceed VIRTUALDEV_MAX_IOBUFS.
4462 */
4463 if (vp && ((mp = vp->v_mount)) && mp != dead_mountp &&
4464 mp->mnt_kern_flag & MNTK_VIRTUALDEV) {
4465 alloc_for_virtualdev = TRUE;
4466 while ((!priv && mp->mnt_iobufinuse > MNT_VIRTUALDEV_MAX_IOBUFS) ||
4467 bufstats.bufs_iobufinuse_vdev > VIRTUALDEV_MAX_IOBUFS) {
4468 bufstats.bufs_iobufsleeps++;
4469
4470 need_iobuffer = 1;
4471 (void)msleep(&need_iobuffer, &iobuffer_mtxp,
4472 PSPIN | (PRIBIO + 1), (const char *)"alloc_io_buf (1)",
4473 NULL);
4474 }
4475 }
4476
4477 while ((((uint32_t)(niobuf_headers - NRESERVEDIOBUFS) < bufstats.bufs_iobufinuse) && !priv) ||
4478 (bp = iobufqueue.tqh_first) == NULL) {
4479 bufstats.bufs_iobufsleeps++;
4480
4481 need_iobuffer = 1;
4482 (void)msleep(&need_iobuffer, &iobuffer_mtxp, PSPIN | (PRIBIO + 1),
4483 (const char *)"alloc_io_buf (2)", NULL);
4484 }
4485 TAILQ_REMOVE(&iobufqueue, bp, b_freelist);
4486
4487 bufstats.bufs_iobufinuse++;
4488 if (bufstats.bufs_iobufinuse > bufstats.bufs_iobufmax) {
4489 bufstats.bufs_iobufmax = bufstats.bufs_iobufinuse;
4490 }
4491
4492 if (alloc_for_virtualdev) {
4493 mp->mnt_iobufinuse++;
4494 bufstats.bufs_iobufinuse_vdev++;
4495 }
4496
4497 lck_mtx_unlock(&iobuffer_mtxp);
4498
4499 /*
4500 * initialize various fields
4501 * we don't need to hold the mutex since the buffer
4502 * is now private... the vp should have a reference
4503 * on it and is not protected by this mutex in any event
4504 */
4505 bp->b_timestamp = 0;
4506 bp->b_proc = NULL;
4507
4508 bp->b_datap = 0;
4509 bp->b_flags = 0;
4510 bp->b_lflags = BL_BUSY | BL_IOBUF;
4511 if (alloc_for_virtualdev) {
4512 bp->b_lflags |= BL_IOBUF_VDEV;
4513 }
4514 bp->b_redundancy_flags = 0;
4515 bp->b_blkno = bp->b_lblkno = 0;
4516 bp->b_lblksize = 0;
4517 #ifdef JOE_DEBUG
4518 bp->b_owner = current_thread();
4519 bp->b_tag = 6;
4520 #endif
4521 bp->b_iodone = NULL;
4522 bp->b_error = 0;
4523 bp->b_resid = 0;
4524 bp->b_bcount = 0;
4525 bp->b_bufsize = 0;
4526 bp->b_upl = NULL;
4527 bp->b_fsprivate = (void *)NULL;
4528 bp->b_vp = vp;
4529 bzero(&bp->b_attr, sizeof(struct bufattr));
4530
4531 if (vp && (vp->v_type == VBLK || vp->v_type == VCHR)) {
4532 bp->b_dev = vp->v_rdev;
4533 } else {
4534 bp->b_dev = NODEV;
4535 }
4536
4537 return bp;
4538 }
4539
4540
4541 void
free_io_buf(buf_t bp)4542 free_io_buf(buf_t bp)
4543 {
4544 int need_wakeup = 0;
4545 int free_for_virtualdev = FALSE;
4546 mount_t mp = NULL;
4547
4548 /* Was this iobuf for a diskimage ? */
4549 if (bp->b_lflags & BL_IOBUF_VDEV) {
4550 free_for_virtualdev = TRUE;
4551 if (bp->b_vp) {
4552 mp = bp->b_vp->v_mount;
4553 }
4554 }
4555
4556 /*
4557 * put buffer back on the head of the iobufqueue
4558 */
4559 bp->b_vp = NULL;
4560 bp->b_flags = B_INVAL;
4561
4562 /* Zero out the bufattr and its flags before relinquishing this iobuf */
4563 bzero(&bp->b_attr, sizeof(struct bufattr));
4564
4565 lck_mtx_lock_spin(&iobuffer_mtxp);
4566
4567 binsheadfree(bp, &iobufqueue, -1);
4568
4569 if (need_iobuffer) {
4570 /*
4571 * Wake up any processes waiting because they need an io buffer
4572 *
4573 * do the wakeup after we drop the mutex... it's possible that the
4574 * wakeup will be superfluous if need_iobuffer gets set again and
4575 * another thread runs this path, but it's highly unlikely, doesn't
4576 * hurt, and it means we don't hold up I/O progress if the wakeup blocks
4577 * trying to grab a task related lock...
4578 */
4579 need_iobuffer = 0;
4580 need_wakeup = 1;
4581 }
4582 if (bufstats.bufs_iobufinuse <= 0) {
4583 panic("free_io_buf: bp(%p) - bufstats.bufs_iobufinuse < 0", bp);
4584 }
4585
4586 bufstats.bufs_iobufinuse--;
4587
4588 if (free_for_virtualdev) {
4589 bufstats.bufs_iobufinuse_vdev--;
4590 if (mp && mp != dead_mountp) {
4591 mp->mnt_iobufinuse--;
4592 }
4593 }
4594
4595 lck_mtx_unlock(&iobuffer_mtxp);
4596
4597 if (need_wakeup) {
4598 wakeup(&need_iobuffer);
4599 }
4600 }
4601
4602
4603 void
buf_list_lock(void)4604 buf_list_lock(void)
4605 {
4606 lck_mtx_lock_spin(&buf_mtx);
4607 }
4608
4609 void
buf_list_unlock(void)4610 buf_list_unlock(void)
4611 {
4612 lck_mtx_unlock(&buf_mtx);
4613 }
4614
4615 /*
4616 * If getnewbuf() calls bcleanbuf() on the same thread
4617 * there is a potential for stack overrun and deadlocks.
4618 * So we always handoff the work to a worker thread for completion
4619 */
4620
4621
4622 static void
bcleanbuf_thread_init(void)4623 bcleanbuf_thread_init(void)
4624 {
4625 thread_t thread = THREAD_NULL;
4626
4627 /* create worker thread */
4628 kernel_thread_start((thread_continue_t)bcleanbuf_thread, NULL, &thread);
4629 thread_deallocate(thread);
4630 }
4631
4632 typedef int (*bcleanbufcontinuation)(int);
4633
4634 __attribute__((noreturn))
4635 static void
bcleanbuf_thread(void)4636 bcleanbuf_thread(void)
4637 {
4638 struct buf *bp;
4639 int error = 0;
4640 int loopcnt = 0;
4641
4642 for (;;) {
4643 lck_mtx_lock_spin(&buf_mtx);
4644
4645 while ((bp = TAILQ_FIRST(&bufqueues[BQ_LAUNDRY])) == NULL) {
4646 (void)msleep0(&bufqueues[BQ_LAUNDRY], &buf_mtx, PRIBIO | PDROP, "blaundry", 0, (bcleanbufcontinuation)bcleanbuf_thread);
4647 }
4648
4649 /*
4650 * Remove from the queue
4651 */
4652 bremfree_locked(bp);
4653
4654 /*
4655 * Buffer is no longer on any free list
4656 */
4657 SET(bp->b_lflags, BL_BUSY);
4658 buf_busycount++;
4659
4660 #ifdef JOE_DEBUG
4661 bp->b_owner = current_thread();
4662 bp->b_tag = 10;
4663 #endif
4664
4665 lck_mtx_unlock(&buf_mtx);
4666 /*
4667 * do the IO
4668 */
4669 error = bawrite_internal(bp, 0);
4670
4671 if (error) {
4672 bp->b_whichq = BQ_LAUNDRY;
4673 bp->b_timestamp = buf_timestamp();
4674
4675 lck_mtx_lock_spin(&buf_mtx);
4676
4677 binstailfree(bp, &bufqueues[BQ_LAUNDRY], BQ_LAUNDRY);
4678 blaundrycnt++;
4679
4680 /* we never leave a busy page on the laundry queue */
4681 CLR(bp->b_lflags, BL_BUSY);
4682 buf_busycount--;
4683 #ifdef JOE_DEBUG
4684 bp->b_owner = current_thread();
4685 bp->b_tag = 11;
4686 #endif
4687
4688 lck_mtx_unlock(&buf_mtx);
4689
4690 if (loopcnt > MAXLAUNDRY) {
4691 /*
4692 * bawrite_internal() can return errors if we're throttled. If we've
4693 * done several I/Os and failed, give the system some time to unthrottle
4694 * the vnode
4695 */
4696 (void)tsleep((void *)&bufqueues[BQ_LAUNDRY], PRIBIO, "blaundry", 1);
4697 loopcnt = 0;
4698 } else {
4699 /* give other threads a chance to run */
4700 (void)thread_block(THREAD_CONTINUE_NULL);
4701 loopcnt++;
4702 }
4703 }
4704 }
4705 }
4706
4707
4708 static int
brecover_data(buf_t bp)4709 brecover_data(buf_t bp)
4710 {
4711 int upl_offset;
4712 upl_t upl;
4713 upl_page_info_t *pl;
4714 kern_return_t kret;
4715 vnode_t vp = bp->b_vp;
4716 int upl_flags;
4717
4718
4719 if (!UBCINFOEXISTS(vp) || bp->b_bufsize == 0) {
4720 goto dump_buffer;
4721 }
4722
4723 upl_flags = UPL_PRECIOUS;
4724 if (!(buf_flags(bp) & B_READ)) {
4725 /*
4726 * "write" operation: let the UPL subsystem know
4727 * that we intend to modify the buffer cache pages we're
4728 * gathering.
4729 */
4730 upl_flags |= UPL_WILL_MODIFY;
4731 }
4732
4733 kret = ubc_create_upl_kernel(vp,
4734 ubc_blktooff(vp, bp->b_lblkno),
4735 bp->b_bufsize,
4736 &upl,
4737 &pl,
4738 upl_flags,
4739 VM_KERN_MEMORY_FILE);
4740 if (kret != KERN_SUCCESS) {
4741 panic("Failed to create UPL");
4742 }
4743
4744 for (upl_offset = 0; (uint32_t)upl_offset < bp->b_bufsize; upl_offset += PAGE_SIZE) {
4745 if (!upl_valid_page(pl, upl_offset / PAGE_SIZE) || !upl_dirty_page(pl, upl_offset / PAGE_SIZE)) {
4746 ubc_upl_abort(upl, 0);
4747 goto dump_buffer;
4748 }
4749 }
4750 bp->b_upl = upl;
4751
4752 kret = ubc_upl_map(upl, (vm_offset_t *)&(bp->b_datap));
4753
4754 if (kret != KERN_SUCCESS) {
4755 panic("getblk: ubc_upl_map() failed with (%d)", kret);
4756 }
4757 return 1;
4758
4759 dump_buffer:
4760 bp->b_bufsize = 0;
4761 SET(bp->b_flags, B_INVAL);
4762 buf_brelse(bp);
4763
4764 return 0;
4765 }
4766
4767 int
fs_buffer_cache_gc_register(void (* callout)(int,void *),void * context)4768 fs_buffer_cache_gc_register(void (* callout)(int, void *), void *context)
4769 {
4770 lck_mtx_lock(&buf_gc_callout);
4771 for (int i = 0; i < FS_BUFFER_CACHE_GC_CALLOUTS_MAX_SIZE; i++) {
4772 if (fs_callouts[i].callout == NULL) {
4773 fs_callouts[i].callout = callout;
4774 fs_callouts[i].context = context;
4775 lck_mtx_unlock(&buf_gc_callout);
4776 return 0;
4777 }
4778 }
4779
4780 lck_mtx_unlock(&buf_gc_callout);
4781 return ENOMEM;
4782 }
4783
4784 int
fs_buffer_cache_gc_unregister(void (* callout)(int,void *),void * context)4785 fs_buffer_cache_gc_unregister(void (* callout)(int, void *), void *context)
4786 {
4787 lck_mtx_lock(&buf_gc_callout);
4788 for (int i = 0; i < FS_BUFFER_CACHE_GC_CALLOUTS_MAX_SIZE; i++) {
4789 if (fs_callouts[i].callout == callout &&
4790 fs_callouts[i].context == context) {
4791 fs_callouts[i].callout = NULL;
4792 fs_callouts[i].context = NULL;
4793 }
4794 }
4795 lck_mtx_unlock(&buf_gc_callout);
4796 return 0;
4797 }
4798
4799 static void
fs_buffer_cache_gc_dispatch_callouts(int all)4800 fs_buffer_cache_gc_dispatch_callouts(int all)
4801 {
4802 lck_mtx_lock(&buf_gc_callout);
4803 for (int i = 0; i < FS_BUFFER_CACHE_GC_CALLOUTS_MAX_SIZE; i++) {
4804 if (fs_callouts[i].callout != NULL) {
4805 fs_callouts[i].callout(all, fs_callouts[i].context);
4806 }
4807 }
4808 lck_mtx_unlock(&buf_gc_callout);
4809 }
4810
4811 static boolean_t
buffer_cache_gc(int all)4812 buffer_cache_gc(int all)
4813 {
4814 buf_t bp;
4815 boolean_t did_large_zfree = FALSE;
4816 boolean_t need_wakeup = FALSE;
4817 int now = buf_timestamp();
4818 uint32_t found = 0;
4819 struct bqueues privq;
4820 int thresh_hold = BUF_STALE_THRESHHOLD;
4821
4822 if (all) {
4823 thresh_hold = 0;
4824 }
4825 /*
4826 * We only care about metadata (incore storage comes from zalloc()).
4827 * Unless "all" is set (used to evict meta data buffers in preparation
4828 * for deep sleep), we only evict up to BUF_MAX_GC_BATCH_SIZE buffers
4829 * that have not been accessed in the last BUF_STALE_THRESHOLD seconds.
4830 * BUF_MAX_GC_BATCH_SIZE controls both the hold time of the global lock
4831 * "buf_mtx" and the length of time we spend compute bound in the GC
4832 * thread which calls this function
4833 */
4834 lck_mtx_lock(&buf_mtx);
4835
4836 do {
4837 found = 0;
4838 TAILQ_INIT(&privq);
4839 need_wakeup = FALSE;
4840
4841 while (((bp = TAILQ_FIRST(&bufqueues[BQ_META]))) &&
4842 (now > bp->b_timestamp) &&
4843 (now - bp->b_timestamp > thresh_hold) &&
4844 (found < BUF_MAX_GC_BATCH_SIZE)) {
4845 /* Remove from free list */
4846 bremfree_locked(bp);
4847 found++;
4848
4849 #ifdef JOE_DEBUG
4850 bp->b_owner = current_thread();
4851 bp->b_tag = 12;
4852 #endif
4853
4854 /* If dirty, move to laundry queue and remember to do wakeup */
4855 if (ISSET(bp->b_flags, B_DELWRI)) {
4856 SET(bp->b_lflags, BL_WANTDEALLOC);
4857
4858 bmovelaundry(bp);
4859 need_wakeup = TRUE;
4860
4861 continue;
4862 }
4863
4864 /*
4865 * Mark busy and put on private list. We could technically get
4866 * away without setting BL_BUSY here.
4867 */
4868 SET(bp->b_lflags, BL_BUSY);
4869 buf_busycount++;
4870
4871 /*
4872 * Remove from hash and dissociate from vp.
4873 */
4874 bremhash(bp);
4875 if (bp->b_vp) {
4876 brelvp_locked(bp);
4877 }
4878
4879 TAILQ_INSERT_TAIL(&privq, bp, b_freelist);
4880 }
4881
4882 if (found == 0) {
4883 break;
4884 }
4885
4886 /* Drop lock for batch processing */
4887 lck_mtx_unlock(&buf_mtx);
4888
4889 /* Wakeup and yield for laundry if need be */
4890 if (need_wakeup) {
4891 wakeup(&bufqueues[BQ_LAUNDRY]);
4892 (void)thread_block(THREAD_CONTINUE_NULL);
4893 }
4894
4895 /* Clean up every buffer on private list */
4896 TAILQ_FOREACH(bp, &privq, b_freelist) {
4897 /* Take note if we've definitely freed at least a page to a zone */
4898 if ((ISSET(bp->b_flags, B_ZALLOC)) && (buf_size(bp) >= PAGE_SIZE)) {
4899 did_large_zfree = TRUE;
4900 }
4901
4902 trace(TR_BRELSE, pack(bp->b_vp, bp->b_bufsize), bp->b_lblkno);
4903
4904 /* Free Storage */
4905 buf_free_meta_store(bp);
4906
4907 /* Release credentials */
4908 buf_release_credentials(bp);
4909
4910 /* Prepare for moving to empty queue */
4911 CLR(bp->b_flags, (B_META | B_ZALLOC | B_DELWRI | B_LOCKED
4912 | B_AGE | B_ASYNC | B_NOCACHE | B_FUA));
4913 bp->b_whichq = BQ_EMPTY;
4914 BLISTNONE(bp);
4915 }
4916 lck_mtx_lock(&buf_mtx);
4917
4918 /* Back under lock, move them all to invalid hash and clear busy */
4919 TAILQ_FOREACH(bp, &privq, b_freelist) {
4920 binshash(bp, &invalhash);
4921 CLR(bp->b_lflags, BL_BUSY);
4922 buf_busycount--;
4923
4924 #ifdef JOE_DEBUG
4925 if (bp->b_owner != current_thread()) {
4926 panic("Buffer stolen from buffer_cache_gc()");
4927 }
4928 bp->b_owner = current_thread();
4929 bp->b_tag = 13;
4930 #endif
4931 }
4932
4933 /* And do a big bulk move to the empty queue */
4934 TAILQ_CONCAT(&bufqueues[BQ_EMPTY], &privq, b_freelist);
4935 } while (all && (found == BUF_MAX_GC_BATCH_SIZE));
4936
4937 lck_mtx_unlock(&buf_mtx);
4938
4939 fs_buffer_cache_gc_dispatch_callouts(all);
4940
4941 return did_large_zfree;
4942 }
4943
4944
4945 /*
4946 * disabled for now
4947 */
4948
4949 #if FLUSH_QUEUES
4950
4951 #define NFLUSH 32
4952
4953 static int
bp_cmp(void * a,void * b)4954 bp_cmp(void *a, void *b)
4955 {
4956 buf_t *bp_a = *(buf_t **)a,
4957 *bp_b = *(buf_t **)b;
4958 daddr64_t res;
4959
4960 // don't have to worry about negative block
4961 // numbers so this is ok to do.
4962 //
4963 res = (bp_a->b_blkno - bp_b->b_blkno);
4964
4965 return (int)res;
4966 }
4967
4968
4969 int
bflushq(int whichq,mount_t mp)4970 bflushq(int whichq, mount_t mp)
4971 {
4972 buf_t bp, next;
4973 int i, buf_count;
4974 int total_writes = 0;
4975 static buf_t flush_table[NFLUSH];
4976
4977 if (whichq < 0 || whichq >= BQUEUES) {
4978 return 0;
4979 }
4980
4981 restart:
4982 lck_mtx_lock(&buf_mtx);
4983
4984 bp = TAILQ_FIRST(&bufqueues[whichq]);
4985
4986 for (buf_count = 0; bp; bp = next) {
4987 next = bp->b_freelist.tqe_next;
4988
4989 if (bp->b_vp == NULL || bp->b_vp->v_mount != mp) {
4990 continue;
4991 }
4992
4993 if (ISSET(bp->b_flags, B_DELWRI) && !ISSET(bp->b_lflags, BL_BUSY)) {
4994 bremfree_locked(bp);
4995 #ifdef JOE_DEBUG
4996 bp->b_owner = current_thread();
4997 bp->b_tag = 7;
4998 #endif
4999 SET(bp->b_lflags, BL_BUSY);
5000 buf_busycount++;
5001
5002 flush_table[buf_count] = bp;
5003 buf_count++;
5004 total_writes++;
5005
5006 if (buf_count >= NFLUSH) {
5007 lck_mtx_unlock(&buf_mtx);
5008
5009 qsort(flush_table, buf_count, sizeof(struct buf *), bp_cmp);
5010
5011 for (i = 0; i < buf_count; i++) {
5012 buf_bawrite(flush_table[i]);
5013 }
5014 goto restart;
5015 }
5016 }
5017 }
5018 lck_mtx_unlock(&buf_mtx);
5019
5020 if (buf_count > 0) {
5021 qsort(flush_table, buf_count, sizeof(struct buf *), bp_cmp);
5022
5023 for (i = 0; i < buf_count; i++) {
5024 buf_bawrite(flush_table[i]);
5025 }
5026 }
5027
5028 return total_writes;
5029 }
5030 #endif
5031