1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2020 by Delphix. All rights reserved.
24 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
25 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
26 * Copyright (c) 2016, Nexenta Systems, Inc. All rights reserved.
27 * Copyright (c) 2015 by Chunwei Chen. All rights reserved.
28 * Copyright (c) 2019 Datto Inc.
29 * Copyright (c) 2019, Klara Inc.
30 * Copyright (c) 2019, Allan Jude
31 */
32
33 #include <sys/dmu.h>
34 #include <sys/dmu_impl.h>
35 #include <sys/dmu_tx.h>
36 #include <sys/dbuf.h>
37 #include <sys/dnode.h>
38 #include <sys/zfs_context.h>
39 #include <sys/dmu_objset.h>
40 #include <sys/dmu_traverse.h>
41 #include <sys/dsl_dataset.h>
42 #include <sys/dsl_dir.h>
43 #include <sys/dsl_pool.h>
44 #include <sys/dsl_synctask.h>
45 #include <sys/dsl_prop.h>
46 #include <sys/dmu_zfetch.h>
47 #include <sys/zfs_ioctl.h>
48 #include <sys/zap.h>
49 #include <sys/zio_checksum.h>
50 #include <sys/zio_compress.h>
51 #include <sys/sa.h>
52 #include <sys/zfeature.h>
53 #include <sys/abd.h>
54 #include <sys/trace_zfs.h>
55 #include <sys/zfs_racct.h>
56 #include <sys/zfs_rlock.h>
57 #ifdef _KERNEL
58 #include <sys/vmsystm.h>
59 #include <sys/zfs_znode.h>
60 #endif
61
62 /*
63 * Enable/disable nopwrite feature.
64 */
65 int zfs_nopwrite_enabled = 1;
66
67 /*
68 * Tunable to control percentage of dirtied L1 blocks from frees allowed into
69 * one TXG. After this threshold is crossed, additional dirty blocks from frees
70 * will wait until the next TXG.
71 * A value of zero will disable this throttle.
72 */
73 unsigned long zfs_per_txg_dirty_frees_percent = 5;
74
75 /*
76 * Enable/disable forcing txg sync when dirty in dmu_offset_next.
77 */
78 int zfs_dmu_offset_next_sync = 0;
79
80 /*
81 * Limit the amount we can prefetch with one call to this amount. This
82 * helps to limit the amount of memory that can be used by prefetching.
83 * Larger objects should be prefetched a bit at a time.
84 */
85 int dmu_prefetch_max = 8 * SPA_MAXBLOCKSIZE;
86
87 const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES] = {
88 {DMU_BSWAP_UINT8, TRUE, FALSE, FALSE, "unallocated" },
89 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "object directory" },
90 {DMU_BSWAP_UINT64, TRUE, TRUE, FALSE, "object array" },
91 {DMU_BSWAP_UINT8, TRUE, FALSE, FALSE, "packed nvlist" },
92 {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "packed nvlist size" },
93 {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "bpobj" },
94 {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "bpobj header" },
95 {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "SPA space map header" },
96 {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "SPA space map" },
97 {DMU_BSWAP_UINT64, TRUE, FALSE, TRUE, "ZIL intent log" },
98 {DMU_BSWAP_DNODE, TRUE, FALSE, TRUE, "DMU dnode" },
99 {DMU_BSWAP_OBJSET, TRUE, TRUE, FALSE, "DMU objset" },
100 {DMU_BSWAP_UINT64, TRUE, TRUE, FALSE, "DSL directory" },
101 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL directory child map"},
102 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL dataset snap map" },
103 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL props" },
104 {DMU_BSWAP_UINT64, TRUE, TRUE, FALSE, "DSL dataset" },
105 {DMU_BSWAP_ZNODE, TRUE, FALSE, FALSE, "ZFS znode" },
106 {DMU_BSWAP_OLDACL, TRUE, FALSE, TRUE, "ZFS V0 ACL" },
107 {DMU_BSWAP_UINT8, FALSE, FALSE, TRUE, "ZFS plain file" },
108 {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "ZFS directory" },
109 {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "ZFS master node" },
110 {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "ZFS delete queue" },
111 {DMU_BSWAP_UINT8, FALSE, FALSE, TRUE, "zvol object" },
112 {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "zvol prop" },
113 {DMU_BSWAP_UINT8, FALSE, FALSE, TRUE, "other uint8[]" },
114 {DMU_BSWAP_UINT64, FALSE, FALSE, TRUE, "other uint64[]" },
115 {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "other ZAP" },
116 {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "persistent error log" },
117 {DMU_BSWAP_UINT8, TRUE, FALSE, FALSE, "SPA history" },
118 {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "SPA history offsets" },
119 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "Pool properties" },
120 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL permissions" },
121 {DMU_BSWAP_ACL, TRUE, FALSE, TRUE, "ZFS ACL" },
122 {DMU_BSWAP_UINT8, TRUE, FALSE, TRUE, "ZFS SYSACL" },
123 {DMU_BSWAP_UINT8, TRUE, FALSE, TRUE, "FUID table" },
124 {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "FUID table size" },
125 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL dataset next clones"},
126 {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "scan work queue" },
127 {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "ZFS user/group/project used" },
128 {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "ZFS user/group/project quota"},
129 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "snapshot refcount tags"},
130 {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "DDT ZAP algorithm" },
131 {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "DDT statistics" },
132 {DMU_BSWAP_UINT8, TRUE, FALSE, TRUE, "System attributes" },
133 {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "SA master node" },
134 {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "SA attr registration" },
135 {DMU_BSWAP_ZAP, TRUE, FALSE, TRUE, "SA attr layouts" },
136 {DMU_BSWAP_ZAP, TRUE, FALSE, FALSE, "scan translations" },
137 {DMU_BSWAP_UINT8, FALSE, FALSE, TRUE, "deduplicated block" },
138 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL deadlist map" },
139 {DMU_BSWAP_UINT64, TRUE, TRUE, FALSE, "DSL deadlist map hdr" },
140 {DMU_BSWAP_ZAP, TRUE, TRUE, FALSE, "DSL dir clones" },
141 {DMU_BSWAP_UINT64, TRUE, FALSE, FALSE, "bpobj subobj" }
142 };
143
144 const dmu_object_byteswap_info_t dmu_ot_byteswap[DMU_BSWAP_NUMFUNCS] = {
145 { byteswap_uint8_array, "uint8" },
146 { byteswap_uint16_array, "uint16" },
147 { byteswap_uint32_array, "uint32" },
148 { byteswap_uint64_array, "uint64" },
149 { zap_byteswap, "zap" },
150 { dnode_buf_byteswap, "dnode" },
151 { dmu_objset_byteswap, "objset" },
152 { zfs_znode_byteswap, "znode" },
153 { zfs_oldacl_byteswap, "oldacl" },
154 { zfs_acl_byteswap, "acl" }
155 };
156
157 static int
dmu_buf_hold_noread_by_dnode(dnode_t * dn,uint64_t offset,void * tag,dmu_buf_t ** dbp)158 dmu_buf_hold_noread_by_dnode(dnode_t *dn, uint64_t offset,
159 void *tag, dmu_buf_t **dbp)
160 {
161 uint64_t blkid;
162 dmu_buf_impl_t *db;
163
164 rw_enter(&dn->dn_struct_rwlock, RW_READER);
165 blkid = dbuf_whichblock(dn, 0, offset);
166 db = dbuf_hold(dn, blkid, tag);
167 rw_exit(&dn->dn_struct_rwlock);
168
169 if (db == NULL) {
170 *dbp = NULL;
171 return (SET_ERROR(EIO));
172 }
173
174 *dbp = &db->db;
175 return (0);
176 }
177 int
dmu_buf_hold_noread(objset_t * os,uint64_t object,uint64_t offset,void * tag,dmu_buf_t ** dbp)178 dmu_buf_hold_noread(objset_t *os, uint64_t object, uint64_t offset,
179 void *tag, dmu_buf_t **dbp)
180 {
181 dnode_t *dn;
182 uint64_t blkid;
183 dmu_buf_impl_t *db;
184 int err;
185
186 err = dnode_hold(os, object, FTAG, &dn);
187 if (err)
188 return (err);
189 rw_enter(&dn->dn_struct_rwlock, RW_READER);
190 blkid = dbuf_whichblock(dn, 0, offset);
191 db = dbuf_hold(dn, blkid, tag);
192 rw_exit(&dn->dn_struct_rwlock);
193 dnode_rele(dn, FTAG);
194
195 if (db == NULL) {
196 *dbp = NULL;
197 return (SET_ERROR(EIO));
198 }
199
200 *dbp = &db->db;
201 return (err);
202 }
203
204 int
dmu_buf_hold_by_dnode(dnode_t * dn,uint64_t offset,void * tag,dmu_buf_t ** dbp,int flags)205 dmu_buf_hold_by_dnode(dnode_t *dn, uint64_t offset,
206 void *tag, dmu_buf_t **dbp, int flags)
207 {
208 int err;
209 int db_flags = DB_RF_CANFAIL;
210
211 if (flags & DMU_READ_NO_PREFETCH)
212 db_flags |= DB_RF_NOPREFETCH;
213 if (flags & DMU_READ_NO_DECRYPT)
214 db_flags |= DB_RF_NO_DECRYPT;
215
216 err = dmu_buf_hold_noread_by_dnode(dn, offset, tag, dbp);
217 if (err == 0) {
218 dmu_buf_impl_t *db = (dmu_buf_impl_t *)(*dbp);
219 err = dbuf_read(db, NULL, db_flags);
220 if (err != 0) {
221 dbuf_rele(db, tag);
222 *dbp = NULL;
223 }
224 }
225
226 return (err);
227 }
228
229 int
dmu_buf_hold(objset_t * os,uint64_t object,uint64_t offset,void * tag,dmu_buf_t ** dbp,int flags)230 dmu_buf_hold(objset_t *os, uint64_t object, uint64_t offset,
231 void *tag, dmu_buf_t **dbp, int flags)
232 {
233 int err;
234 int db_flags = DB_RF_CANFAIL;
235
236 if (flags & DMU_READ_NO_PREFETCH)
237 db_flags |= DB_RF_NOPREFETCH;
238 if (flags & DMU_READ_NO_DECRYPT)
239 db_flags |= DB_RF_NO_DECRYPT;
240
241 err = dmu_buf_hold_noread(os, object, offset, tag, dbp);
242 if (err == 0) {
243 dmu_buf_impl_t *db = (dmu_buf_impl_t *)(*dbp);
244 err = dbuf_read(db, NULL, db_flags);
245 if (err != 0) {
246 dbuf_rele(db, tag);
247 *dbp = NULL;
248 }
249 }
250
251 return (err);
252 }
253
254 int
dmu_bonus_max(void)255 dmu_bonus_max(void)
256 {
257 return (DN_OLD_MAX_BONUSLEN);
258 }
259
260 int
dmu_set_bonus(dmu_buf_t * db_fake,int newsize,dmu_tx_t * tx)261 dmu_set_bonus(dmu_buf_t *db_fake, int newsize, dmu_tx_t *tx)
262 {
263 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
264 dnode_t *dn;
265 int error;
266
267 DB_DNODE_ENTER(db);
268 dn = DB_DNODE(db);
269
270 if (dn->dn_bonus != db) {
271 error = SET_ERROR(EINVAL);
272 } else if (newsize < 0 || newsize > db_fake->db_size) {
273 error = SET_ERROR(EINVAL);
274 } else {
275 dnode_setbonuslen(dn, newsize, tx);
276 error = 0;
277 }
278
279 DB_DNODE_EXIT(db);
280 return (error);
281 }
282
283 int
dmu_set_bonustype(dmu_buf_t * db_fake,dmu_object_type_t type,dmu_tx_t * tx)284 dmu_set_bonustype(dmu_buf_t *db_fake, dmu_object_type_t type, dmu_tx_t *tx)
285 {
286 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
287 dnode_t *dn;
288 int error;
289
290 DB_DNODE_ENTER(db);
291 dn = DB_DNODE(db);
292
293 if (!DMU_OT_IS_VALID(type)) {
294 error = SET_ERROR(EINVAL);
295 } else if (dn->dn_bonus != db) {
296 error = SET_ERROR(EINVAL);
297 } else {
298 dnode_setbonus_type(dn, type, tx);
299 error = 0;
300 }
301
302 DB_DNODE_EXIT(db);
303 return (error);
304 }
305
306 dmu_object_type_t
dmu_get_bonustype(dmu_buf_t * db_fake)307 dmu_get_bonustype(dmu_buf_t *db_fake)
308 {
309 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
310 dnode_t *dn;
311 dmu_object_type_t type;
312
313 DB_DNODE_ENTER(db);
314 dn = DB_DNODE(db);
315 type = dn->dn_bonustype;
316 DB_DNODE_EXIT(db);
317
318 return (type);
319 }
320
321 int
dmu_rm_spill(objset_t * os,uint64_t object,dmu_tx_t * tx)322 dmu_rm_spill(objset_t *os, uint64_t object, dmu_tx_t *tx)
323 {
324 dnode_t *dn;
325 int error;
326
327 error = dnode_hold(os, object, FTAG, &dn);
328 dbuf_rm_spill(dn, tx);
329 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
330 dnode_rm_spill(dn, tx);
331 rw_exit(&dn->dn_struct_rwlock);
332 dnode_rele(dn, FTAG);
333 return (error);
334 }
335
336 /*
337 * Lookup and hold the bonus buffer for the provided dnode. If the dnode
338 * has not yet been allocated a new bonus dbuf a will be allocated.
339 * Returns ENOENT, EIO, or 0.
340 */
dmu_bonus_hold_by_dnode(dnode_t * dn,void * tag,dmu_buf_t ** dbp,uint32_t flags)341 int dmu_bonus_hold_by_dnode(dnode_t *dn, void *tag, dmu_buf_t **dbp,
342 uint32_t flags)
343 {
344 dmu_buf_impl_t *db;
345 int error;
346 uint32_t db_flags = DB_RF_MUST_SUCCEED;
347
348 if (flags & DMU_READ_NO_PREFETCH)
349 db_flags |= DB_RF_NOPREFETCH;
350 if (flags & DMU_READ_NO_DECRYPT)
351 db_flags |= DB_RF_NO_DECRYPT;
352
353 rw_enter(&dn->dn_struct_rwlock, RW_READER);
354 if (dn->dn_bonus == NULL) {
355 rw_exit(&dn->dn_struct_rwlock);
356 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
357 if (dn->dn_bonus == NULL)
358 dbuf_create_bonus(dn);
359 }
360 db = dn->dn_bonus;
361
362 /* as long as the bonus buf is held, the dnode will be held */
363 if (zfs_refcount_add(&db->db_holds, tag) == 1) {
364 VERIFY(dnode_add_ref(dn, db));
365 atomic_inc_32(&dn->dn_dbufs_count);
366 }
367
368 /*
369 * Wait to drop dn_struct_rwlock until after adding the bonus dbuf's
370 * hold and incrementing the dbuf count to ensure that dnode_move() sees
371 * a dnode hold for every dbuf.
372 */
373 rw_exit(&dn->dn_struct_rwlock);
374
375 error = dbuf_read(db, NULL, db_flags);
376 if (error) {
377 dnode_evict_bonus(dn);
378 dbuf_rele(db, tag);
379 *dbp = NULL;
380 return (error);
381 }
382
383 *dbp = &db->db;
384 return (0);
385 }
386
387 int
dmu_bonus_hold(objset_t * os,uint64_t object,void * tag,dmu_buf_t ** dbp)388 dmu_bonus_hold(objset_t *os, uint64_t object, void *tag, dmu_buf_t **dbp)
389 {
390 dnode_t *dn;
391 int error;
392
393 error = dnode_hold(os, object, FTAG, &dn);
394 if (error)
395 return (error);
396
397 error = dmu_bonus_hold_by_dnode(dn, tag, dbp, DMU_READ_NO_PREFETCH);
398 dnode_rele(dn, FTAG);
399
400 return (error);
401 }
402
403 /*
404 * returns ENOENT, EIO, or 0.
405 *
406 * This interface will allocate a blank spill dbuf when a spill blk
407 * doesn't already exist on the dnode.
408 *
409 * if you only want to find an already existing spill db, then
410 * dmu_spill_hold_existing() should be used.
411 */
412 int
dmu_spill_hold_by_dnode(dnode_t * dn,uint32_t flags,void * tag,dmu_buf_t ** dbp)413 dmu_spill_hold_by_dnode(dnode_t *dn, uint32_t flags, void *tag, dmu_buf_t **dbp)
414 {
415 dmu_buf_impl_t *db = NULL;
416 int err;
417
418 if ((flags & DB_RF_HAVESTRUCT) == 0)
419 rw_enter(&dn->dn_struct_rwlock, RW_READER);
420
421 db = dbuf_hold(dn, DMU_SPILL_BLKID, tag);
422
423 if ((flags & DB_RF_HAVESTRUCT) == 0)
424 rw_exit(&dn->dn_struct_rwlock);
425
426 if (db == NULL) {
427 *dbp = NULL;
428 return (SET_ERROR(EIO));
429 }
430 err = dbuf_read(db, NULL, flags);
431 if (err == 0)
432 *dbp = &db->db;
433 else {
434 dbuf_rele(db, tag);
435 *dbp = NULL;
436 }
437 return (err);
438 }
439
440 int
dmu_spill_hold_existing(dmu_buf_t * bonus,void * tag,dmu_buf_t ** dbp)441 dmu_spill_hold_existing(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp)
442 {
443 dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus;
444 dnode_t *dn;
445 int err;
446
447 DB_DNODE_ENTER(db);
448 dn = DB_DNODE(db);
449
450 if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_SA) {
451 err = SET_ERROR(EINVAL);
452 } else {
453 rw_enter(&dn->dn_struct_rwlock, RW_READER);
454
455 if (!dn->dn_have_spill) {
456 err = SET_ERROR(ENOENT);
457 } else {
458 err = dmu_spill_hold_by_dnode(dn,
459 DB_RF_HAVESTRUCT | DB_RF_CANFAIL, tag, dbp);
460 }
461
462 rw_exit(&dn->dn_struct_rwlock);
463 }
464
465 DB_DNODE_EXIT(db);
466 return (err);
467 }
468
469 int
dmu_spill_hold_by_bonus(dmu_buf_t * bonus,uint32_t flags,void * tag,dmu_buf_t ** dbp)470 dmu_spill_hold_by_bonus(dmu_buf_t *bonus, uint32_t flags, void *tag,
471 dmu_buf_t **dbp)
472 {
473 dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus;
474 dnode_t *dn;
475 int err;
476 uint32_t db_flags = DB_RF_CANFAIL;
477
478 if (flags & DMU_READ_NO_DECRYPT)
479 db_flags |= DB_RF_NO_DECRYPT;
480
481 DB_DNODE_ENTER(db);
482 dn = DB_DNODE(db);
483 err = dmu_spill_hold_by_dnode(dn, db_flags, tag, dbp);
484 DB_DNODE_EXIT(db);
485
486 return (err);
487 }
488
489 /*
490 * Note: longer-term, we should modify all of the dmu_buf_*() interfaces
491 * to take a held dnode rather than <os, object> -- the lookup is wasteful,
492 * and can induce severe lock contention when writing to several files
493 * whose dnodes are in the same block.
494 */
495 int
dmu_buf_hold_array_by_dnode(dnode_t * dn,uint64_t offset,uint64_t length,boolean_t read,void * tag,int * numbufsp,dmu_buf_t *** dbpp,uint32_t flags)496 dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length,
497 boolean_t read, void *tag, int *numbufsp, dmu_buf_t ***dbpp, uint32_t flags)
498 {
499 dmu_buf_t **dbp;
500 zstream_t *zs = NULL;
501 uint64_t blkid, nblks, i;
502 uint32_t dbuf_flags;
503 int err;
504 zio_t *zio = NULL;
505 boolean_t missed = B_FALSE;
506
507 ASSERT(length <= DMU_MAX_ACCESS);
508
509 /*
510 * Note: We directly notify the prefetch code of this read, so that
511 * we can tell it about the multi-block read. dbuf_read() only knows
512 * about the one block it is accessing.
513 */
514 dbuf_flags = DB_RF_CANFAIL | DB_RF_NEVERWAIT | DB_RF_HAVESTRUCT |
515 DB_RF_NOPREFETCH;
516
517 rw_enter(&dn->dn_struct_rwlock, RW_READER);
518 if (dn->dn_datablkshift) {
519 int blkshift = dn->dn_datablkshift;
520 nblks = (P2ROUNDUP(offset + length, 1ULL << blkshift) -
521 P2ALIGN(offset, 1ULL << blkshift)) >> blkshift;
522 } else {
523 if (offset + length > dn->dn_datablksz) {
524 zfs_panic_recover("zfs: accessing past end of object "
525 "%llx/%llx (size=%u access=%llu+%llu)",
526 (longlong_t)dn->dn_objset->
527 os_dsl_dataset->ds_object,
528 (longlong_t)dn->dn_object, dn->dn_datablksz,
529 (longlong_t)offset, (longlong_t)length);
530 rw_exit(&dn->dn_struct_rwlock);
531 return (SET_ERROR(EIO));
532 }
533 nblks = 1;
534 }
535 dbp = kmem_zalloc(sizeof (dmu_buf_t *) * nblks, KM_SLEEP);
536
537 if (read)
538 zio = zio_root(dn->dn_objset->os_spa, NULL, NULL,
539 ZIO_FLAG_CANFAIL);
540 blkid = dbuf_whichblock(dn, 0, offset);
541 if ((flags & DMU_READ_NO_PREFETCH) == 0 &&
542 DNODE_META_IS_CACHEABLE(dn) && length <= zfetch_array_rd_sz) {
543 /*
544 * Prepare the zfetch before initiating the demand reads, so
545 * that if multiple threads block on same indirect block, we
546 * base predictions on the original less racy request order.
547 */
548 zs = dmu_zfetch_prepare(&dn->dn_zfetch, blkid, nblks,
549 read && DNODE_IS_CACHEABLE(dn), B_TRUE);
550 }
551 for (i = 0; i < nblks; i++) {
552 dmu_buf_impl_t *db = dbuf_hold(dn, blkid + i, tag);
553 if (db == NULL) {
554 if (zs)
555 dmu_zfetch_run(zs, missed, B_TRUE);
556 rw_exit(&dn->dn_struct_rwlock);
557 dmu_buf_rele_array(dbp, nblks, tag);
558 if (read)
559 zio_nowait(zio);
560 return (SET_ERROR(EIO));
561 }
562
563 /*
564 * Initiate async demand data read.
565 * We check the db_state after calling dbuf_read() because
566 * (1) dbuf_read() may change the state to CACHED due to a
567 * hit in the ARC, and (2) on a cache miss, a child will
568 * have been added to "zio" but not yet completed, so the
569 * state will not yet be CACHED.
570 */
571 if (read) {
572 (void) dbuf_read(db, zio, dbuf_flags);
573 if (db->db_state != DB_CACHED)
574 missed = B_TRUE;
575 }
576 dbp[i] = &db->db;
577 }
578
579 if (!read)
580 zfs_racct_write(length, nblks);
581
582 if (zs)
583 dmu_zfetch_run(zs, missed, B_TRUE);
584 rw_exit(&dn->dn_struct_rwlock);
585
586 if (read) {
587 /* wait for async read i/o */
588 err = zio_wait(zio);
589 if (err) {
590 dmu_buf_rele_array(dbp, nblks, tag);
591 return (err);
592 }
593
594 /* wait for other io to complete */
595 for (i = 0; i < nblks; i++) {
596 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbp[i];
597 mutex_enter(&db->db_mtx);
598 while (db->db_state == DB_READ ||
599 db->db_state == DB_FILL)
600 cv_wait(&db->db_changed, &db->db_mtx);
601 if (db->db_state == DB_UNCACHED)
602 err = SET_ERROR(EIO);
603 mutex_exit(&db->db_mtx);
604 if (err) {
605 dmu_buf_rele_array(dbp, nblks, tag);
606 return (err);
607 }
608 }
609 }
610
611 *numbufsp = nblks;
612 *dbpp = dbp;
613 return (0);
614 }
615
616 static int
dmu_buf_hold_array(objset_t * os,uint64_t object,uint64_t offset,uint64_t length,int read,void * tag,int * numbufsp,dmu_buf_t *** dbpp)617 dmu_buf_hold_array(objset_t *os, uint64_t object, uint64_t offset,
618 uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp)
619 {
620 dnode_t *dn;
621 int err;
622
623 err = dnode_hold(os, object, FTAG, &dn);
624 if (err)
625 return (err);
626
627 err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag,
628 numbufsp, dbpp, DMU_READ_PREFETCH);
629
630 dnode_rele(dn, FTAG);
631
632 return (err);
633 }
634
635 int
dmu_buf_hold_array_by_bonus(dmu_buf_t * db_fake,uint64_t offset,uint64_t length,boolean_t read,void * tag,int * numbufsp,dmu_buf_t *** dbpp)636 dmu_buf_hold_array_by_bonus(dmu_buf_t *db_fake, uint64_t offset,
637 uint64_t length, boolean_t read, void *tag, int *numbufsp,
638 dmu_buf_t ***dbpp)
639 {
640 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
641 dnode_t *dn;
642 int err;
643
644 DB_DNODE_ENTER(db);
645 dn = DB_DNODE(db);
646 err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag,
647 numbufsp, dbpp, DMU_READ_PREFETCH);
648 DB_DNODE_EXIT(db);
649
650 return (err);
651 }
652
653 void
dmu_buf_rele_array(dmu_buf_t ** dbp_fake,int numbufs,void * tag)654 dmu_buf_rele_array(dmu_buf_t **dbp_fake, int numbufs, void *tag)
655 {
656 int i;
657 dmu_buf_impl_t **dbp = (dmu_buf_impl_t **)dbp_fake;
658
659 if (numbufs == 0)
660 return;
661
662 for (i = 0; i < numbufs; i++) {
663 if (dbp[i])
664 dbuf_rele(dbp[i], tag);
665 }
666
667 kmem_free(dbp, sizeof (dmu_buf_t *) * numbufs);
668 }
669
670 /*
671 * Issue prefetch i/os for the given blocks. If level is greater than 0, the
672 * indirect blocks prefetched will be those that point to the blocks containing
673 * the data starting at offset, and continuing to offset + len.
674 *
675 * Note that if the indirect blocks above the blocks being prefetched are not
676 * in cache, they will be asynchronously read in.
677 */
678 void
dmu_prefetch(objset_t * os,uint64_t object,int64_t level,uint64_t offset,uint64_t len,zio_priority_t pri)679 dmu_prefetch(objset_t *os, uint64_t object, int64_t level, uint64_t offset,
680 uint64_t len, zio_priority_t pri)
681 {
682 dnode_t *dn;
683 uint64_t blkid;
684 int nblks, err;
685
686 if (len == 0) { /* they're interested in the bonus buffer */
687 dn = DMU_META_DNODE(os);
688
689 if (object == 0 || object >= DN_MAX_OBJECT)
690 return;
691
692 rw_enter(&dn->dn_struct_rwlock, RW_READER);
693 blkid = dbuf_whichblock(dn, level,
694 object * sizeof (dnode_phys_t));
695 dbuf_prefetch(dn, level, blkid, pri, 0);
696 rw_exit(&dn->dn_struct_rwlock);
697 return;
698 }
699
700 /*
701 * See comment before the definition of dmu_prefetch_max.
702 */
703 len = MIN(len, dmu_prefetch_max);
704
705 /*
706 * XXX - Note, if the dnode for the requested object is not
707 * already cached, we will do a *synchronous* read in the
708 * dnode_hold() call. The same is true for any indirects.
709 */
710 err = dnode_hold(os, object, FTAG, &dn);
711 if (err != 0)
712 return;
713
714 /*
715 * offset + len - 1 is the last byte we want to prefetch for, and offset
716 * is the first. Then dbuf_whichblk(dn, level, off + len - 1) is the
717 * last block we want to prefetch, and dbuf_whichblock(dn, level,
718 * offset) is the first. Then the number we need to prefetch is the
719 * last - first + 1.
720 */
721 rw_enter(&dn->dn_struct_rwlock, RW_READER);
722 if (level > 0 || dn->dn_datablkshift != 0) {
723 nblks = dbuf_whichblock(dn, level, offset + len - 1) -
724 dbuf_whichblock(dn, level, offset) + 1;
725 } else {
726 nblks = (offset < dn->dn_datablksz);
727 }
728
729 if (nblks != 0) {
730 blkid = dbuf_whichblock(dn, level, offset);
731 for (int i = 0; i < nblks; i++)
732 dbuf_prefetch(dn, level, blkid + i, pri, 0);
733 }
734 rw_exit(&dn->dn_struct_rwlock);
735
736 dnode_rele(dn, FTAG);
737 }
738
739 /*
740 * Get the next "chunk" of file data to free. We traverse the file from
741 * the end so that the file gets shorter over time (if we crashes in the
742 * middle, this will leave us in a better state). We find allocated file
743 * data by simply searching the allocated level 1 indirects.
744 *
745 * On input, *start should be the first offset that does not need to be
746 * freed (e.g. "offset + length"). On return, *start will be the first
747 * offset that should be freed and l1blks is set to the number of level 1
748 * indirect blocks found within the chunk.
749 */
750 static int
get_next_chunk(dnode_t * dn,uint64_t * start,uint64_t minimum,uint64_t * l1blks)751 get_next_chunk(dnode_t *dn, uint64_t *start, uint64_t minimum, uint64_t *l1blks)
752 {
753 uint64_t blks;
754 uint64_t maxblks = DMU_MAX_ACCESS >> (dn->dn_indblkshift + 1);
755 /* bytes of data covered by a level-1 indirect block */
756 uint64_t iblkrange = (uint64_t)dn->dn_datablksz *
757 EPB(dn->dn_indblkshift, SPA_BLKPTRSHIFT);
758
759 ASSERT3U(minimum, <=, *start);
760
761 /*
762 * Check if we can free the entire range assuming that all of the
763 * L1 blocks in this range have data. If we can, we use this
764 * worst case value as an estimate so we can avoid having to look
765 * at the object's actual data.
766 */
767 uint64_t total_l1blks =
768 (roundup(*start, iblkrange) - (minimum / iblkrange * iblkrange)) /
769 iblkrange;
770 if (total_l1blks <= maxblks) {
771 *l1blks = total_l1blks;
772 *start = minimum;
773 return (0);
774 }
775 ASSERT(ISP2(iblkrange));
776
777 for (blks = 0; *start > minimum && blks < maxblks; blks++) {
778 int err;
779
780 /*
781 * dnode_next_offset(BACKWARDS) will find an allocated L1
782 * indirect block at or before the input offset. We must
783 * decrement *start so that it is at the end of the region
784 * to search.
785 */
786 (*start)--;
787
788 err = dnode_next_offset(dn,
789 DNODE_FIND_BACKWARDS, start, 2, 1, 0);
790
791 /* if there are no indirect blocks before start, we are done */
792 if (err == ESRCH) {
793 *start = minimum;
794 break;
795 } else if (err != 0) {
796 *l1blks = blks;
797 return (err);
798 }
799
800 /* set start to the beginning of this L1 indirect */
801 *start = P2ALIGN(*start, iblkrange);
802 }
803 if (*start < minimum)
804 *start = minimum;
805 *l1blks = blks;
806
807 return (0);
808 }
809
810 /*
811 * If this objset is of type OST_ZFS return true if vfs's unmounted flag is set,
812 * otherwise return false.
813 * Used below in dmu_free_long_range_impl() to enable abort when unmounting
814 */
815 static boolean_t
dmu_objset_zfs_unmounting(objset_t * os)816 dmu_objset_zfs_unmounting(objset_t *os)
817 {
818 #ifdef _KERNEL
819 if (dmu_objset_type(os) == DMU_OST_ZFS)
820 return (zfs_get_vfs_flag_unmounted(os));
821 #else
822 (void) os;
823 #endif
824 return (B_FALSE);
825 }
826
827 static int
dmu_free_long_range_impl(objset_t * os,dnode_t * dn,uint64_t offset,uint64_t length)828 dmu_free_long_range_impl(objset_t *os, dnode_t *dn, uint64_t offset,
829 uint64_t length)
830 {
831 uint64_t object_size;
832 int err;
833 uint64_t dirty_frees_threshold;
834 dsl_pool_t *dp = dmu_objset_pool(os);
835
836 if (dn == NULL)
837 return (SET_ERROR(EINVAL));
838
839 object_size = (dn->dn_maxblkid + 1) * dn->dn_datablksz;
840 if (offset >= object_size)
841 return (0);
842
843 if (zfs_per_txg_dirty_frees_percent <= 100)
844 dirty_frees_threshold =
845 zfs_per_txg_dirty_frees_percent * zfs_dirty_data_max / 100;
846 else
847 dirty_frees_threshold = zfs_dirty_data_max / 20;
848
849 if (length == DMU_OBJECT_END || offset + length > object_size)
850 length = object_size - offset;
851
852 while (length != 0) {
853 uint64_t chunk_end, chunk_begin, chunk_len;
854 uint64_t l1blks;
855 dmu_tx_t *tx;
856
857 if (dmu_objset_zfs_unmounting(dn->dn_objset))
858 return (SET_ERROR(EINTR));
859
860 chunk_end = chunk_begin = offset + length;
861
862 /* move chunk_begin backwards to the beginning of this chunk */
863 err = get_next_chunk(dn, &chunk_begin, offset, &l1blks);
864 if (err)
865 return (err);
866 ASSERT3U(chunk_begin, >=, offset);
867 ASSERT3U(chunk_begin, <=, chunk_end);
868
869 chunk_len = chunk_end - chunk_begin;
870
871 tx = dmu_tx_create(os);
872 dmu_tx_hold_free(tx, dn->dn_object, chunk_begin, chunk_len);
873
874 /*
875 * Mark this transaction as typically resulting in a net
876 * reduction in space used.
877 */
878 dmu_tx_mark_netfree(tx);
879 err = dmu_tx_assign(tx, TXG_WAIT);
880 if (err) {
881 dmu_tx_abort(tx);
882 return (err);
883 }
884
885 uint64_t txg = dmu_tx_get_txg(tx);
886
887 mutex_enter(&dp->dp_lock);
888 uint64_t long_free_dirty =
889 dp->dp_long_free_dirty_pertxg[txg & TXG_MASK];
890 mutex_exit(&dp->dp_lock);
891
892 /*
893 * To avoid filling up a TXG with just frees, wait for
894 * the next TXG to open before freeing more chunks if
895 * we have reached the threshold of frees.
896 */
897 if (dirty_frees_threshold != 0 &&
898 long_free_dirty >= dirty_frees_threshold) {
899 DMU_TX_STAT_BUMP(dmu_tx_dirty_frees_delay);
900 dmu_tx_commit(tx);
901 txg_wait_open(dp, 0, B_TRUE);
902 continue;
903 }
904
905 /*
906 * In order to prevent unnecessary write throttling, for each
907 * TXG, we track the cumulative size of L1 blocks being dirtied
908 * in dnode_free_range() below. We compare this number to a
909 * tunable threshold, past which we prevent new L1 dirty freeing
910 * blocks from being added into the open TXG. See
911 * dmu_free_long_range_impl() for details. The threshold
912 * prevents write throttle activation due to dirty freeing L1
913 * blocks taking up a large percentage of zfs_dirty_data_max.
914 */
915 mutex_enter(&dp->dp_lock);
916 dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] +=
917 l1blks << dn->dn_indblkshift;
918 mutex_exit(&dp->dp_lock);
919 DTRACE_PROBE3(free__long__range,
920 uint64_t, long_free_dirty, uint64_t, chunk_len,
921 uint64_t, txg);
922 dnode_free_range(dn, chunk_begin, chunk_len, tx);
923
924 dmu_tx_commit(tx);
925
926 length -= chunk_len;
927 }
928 return (0);
929 }
930
931 int
dmu_free_long_range(objset_t * os,uint64_t object,uint64_t offset,uint64_t length)932 dmu_free_long_range(objset_t *os, uint64_t object,
933 uint64_t offset, uint64_t length)
934 {
935 dnode_t *dn;
936 int err;
937
938 err = dnode_hold(os, object, FTAG, &dn);
939 if (err != 0)
940 return (err);
941 err = dmu_free_long_range_impl(os, dn, offset, length);
942
943 /*
944 * It is important to zero out the maxblkid when freeing the entire
945 * file, so that (a) subsequent calls to dmu_free_long_range_impl()
946 * will take the fast path, and (b) dnode_reallocate() can verify
947 * that the entire file has been freed.
948 */
949 if (err == 0 && offset == 0 && length == DMU_OBJECT_END)
950 dn->dn_maxblkid = 0;
951
952 dnode_rele(dn, FTAG);
953 return (err);
954 }
955
956 int
dmu_free_long_object(objset_t * os,uint64_t object)957 dmu_free_long_object(objset_t *os, uint64_t object)
958 {
959 dmu_tx_t *tx;
960 int err;
961
962 err = dmu_free_long_range(os, object, 0, DMU_OBJECT_END);
963 if (err != 0)
964 return (err);
965
966 tx = dmu_tx_create(os);
967 dmu_tx_hold_bonus(tx, object);
968 dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END);
969 dmu_tx_mark_netfree(tx);
970 err = dmu_tx_assign(tx, TXG_WAIT);
971 if (err == 0) {
972 if (err == 0)
973 err = dmu_object_free(os, object, tx);
974
975 dmu_tx_commit(tx);
976 } else {
977 dmu_tx_abort(tx);
978 }
979
980 return (err);
981 }
982
983 int
dmu_free_range(objset_t * os,uint64_t object,uint64_t offset,uint64_t size,dmu_tx_t * tx)984 dmu_free_range(objset_t *os, uint64_t object, uint64_t offset,
985 uint64_t size, dmu_tx_t *tx)
986 {
987 dnode_t *dn;
988 int err = dnode_hold(os, object, FTAG, &dn);
989 if (err)
990 return (err);
991 ASSERT(offset < UINT64_MAX);
992 ASSERT(size == DMU_OBJECT_END || size <= UINT64_MAX - offset);
993 dnode_free_range(dn, offset, size, tx);
994 dnode_rele(dn, FTAG);
995 return (0);
996 }
997
998 static int
dmu_read_impl(dnode_t * dn,uint64_t offset,uint64_t size,void * buf,uint32_t flags)999 dmu_read_impl(dnode_t *dn, uint64_t offset, uint64_t size,
1000 void *buf, uint32_t flags)
1001 {
1002 dmu_buf_t **dbp;
1003 int numbufs, err = 0;
1004
1005 /*
1006 * Deal with odd block sizes, where there can't be data past the first
1007 * block. If we ever do the tail block optimization, we will need to
1008 * handle that here as well.
1009 */
1010 if (dn->dn_maxblkid == 0) {
1011 uint64_t newsz = offset > dn->dn_datablksz ? 0 :
1012 MIN(size, dn->dn_datablksz - offset);
1013 bzero((char *)buf + newsz, size - newsz);
1014 size = newsz;
1015 }
1016
1017 while (size > 0) {
1018 uint64_t mylen = MIN(size, DMU_MAX_ACCESS / 2);
1019 int i;
1020
1021 /*
1022 * NB: we could do this block-at-a-time, but it's nice
1023 * to be reading in parallel.
1024 */
1025 err = dmu_buf_hold_array_by_dnode(dn, offset, mylen,
1026 TRUE, FTAG, &numbufs, &dbp, flags);
1027 if (err)
1028 break;
1029
1030 for (i = 0; i < numbufs; i++) {
1031 uint64_t tocpy;
1032 int64_t bufoff;
1033 dmu_buf_t *db = dbp[i];
1034
1035 ASSERT(size > 0);
1036
1037 bufoff = offset - db->db_offset;
1038 tocpy = MIN(db->db_size - bufoff, size);
1039
1040 (void) memcpy(buf, (char *)db->db_data + bufoff, tocpy);
1041
1042 offset += tocpy;
1043 size -= tocpy;
1044 buf = (char *)buf + tocpy;
1045 }
1046 dmu_buf_rele_array(dbp, numbufs, FTAG);
1047 }
1048 return (err);
1049 }
1050
1051 int
dmu_read(objset_t * os,uint64_t object,uint64_t offset,uint64_t size,void * buf,uint32_t flags)1052 dmu_read(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
1053 void *buf, uint32_t flags)
1054 {
1055 dnode_t *dn;
1056 int err;
1057
1058 err = dnode_hold(os, object, FTAG, &dn);
1059 if (err != 0)
1060 return (err);
1061
1062 err = dmu_read_impl(dn, offset, size, buf, flags);
1063 dnode_rele(dn, FTAG);
1064 return (err);
1065 }
1066
1067 int
dmu_read_by_dnode(dnode_t * dn,uint64_t offset,uint64_t size,void * buf,uint32_t flags)1068 dmu_read_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size, void *buf,
1069 uint32_t flags)
1070 {
1071 return (dmu_read_impl(dn, offset, size, buf, flags));
1072 }
1073
1074 static void
dmu_write_impl(dmu_buf_t ** dbp,int numbufs,uint64_t offset,uint64_t size,const void * buf,dmu_tx_t * tx)1075 dmu_write_impl(dmu_buf_t **dbp, int numbufs, uint64_t offset, uint64_t size,
1076 const void *buf, dmu_tx_t *tx)
1077 {
1078 int i;
1079
1080 for (i = 0; i < numbufs; i++) {
1081 uint64_t tocpy;
1082 int64_t bufoff;
1083 dmu_buf_t *db = dbp[i];
1084
1085 ASSERT(size > 0);
1086
1087 bufoff = offset - db->db_offset;
1088 tocpy = MIN(db->db_size - bufoff, size);
1089
1090 ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size);
1091
1092 if (tocpy == db->db_size)
1093 dmu_buf_will_fill(db, tx);
1094 else
1095 dmu_buf_will_dirty(db, tx);
1096
1097 (void) memcpy((char *)db->db_data + bufoff, buf, tocpy);
1098
1099 if (tocpy == db->db_size)
1100 dmu_buf_fill_done(db, tx);
1101
1102 offset += tocpy;
1103 size -= tocpy;
1104 buf = (char *)buf + tocpy;
1105 }
1106 }
1107
1108 void
dmu_write(objset_t * os,uint64_t object,uint64_t offset,uint64_t size,const void * buf,dmu_tx_t * tx)1109 dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
1110 const void *buf, dmu_tx_t *tx)
1111 {
1112 dmu_buf_t **dbp;
1113 int numbufs;
1114
1115 if (size == 0)
1116 return;
1117
1118 VERIFY0(dmu_buf_hold_array(os, object, offset, size,
1119 FALSE, FTAG, &numbufs, &dbp));
1120 dmu_write_impl(dbp, numbufs, offset, size, buf, tx);
1121 dmu_buf_rele_array(dbp, numbufs, FTAG);
1122 }
1123
1124 /*
1125 * Note: Lustre is an external consumer of this interface.
1126 */
1127 void
dmu_write_by_dnode(dnode_t * dn,uint64_t offset,uint64_t size,const void * buf,dmu_tx_t * tx)1128 dmu_write_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size,
1129 const void *buf, dmu_tx_t *tx)
1130 {
1131 dmu_buf_t **dbp;
1132 int numbufs;
1133
1134 if (size == 0)
1135 return;
1136
1137 VERIFY0(dmu_buf_hold_array_by_dnode(dn, offset, size,
1138 FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH));
1139 dmu_write_impl(dbp, numbufs, offset, size, buf, tx);
1140 dmu_buf_rele_array(dbp, numbufs, FTAG);
1141 }
1142
1143 void
dmu_prealloc(objset_t * os,uint64_t object,uint64_t offset,uint64_t size,dmu_tx_t * tx)1144 dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
1145 dmu_tx_t *tx)
1146 {
1147 dmu_buf_t **dbp;
1148 int numbufs, i;
1149
1150 if (size == 0)
1151 return;
1152
1153 VERIFY(0 == dmu_buf_hold_array(os, object, offset, size,
1154 FALSE, FTAG, &numbufs, &dbp));
1155
1156 for (i = 0; i < numbufs; i++) {
1157 dmu_buf_t *db = dbp[i];
1158
1159 dmu_buf_will_not_fill(db, tx);
1160 }
1161 dmu_buf_rele_array(dbp, numbufs, FTAG);
1162 }
1163
1164 void
dmu_write_embedded(objset_t * os,uint64_t object,uint64_t offset,void * data,uint8_t etype,uint8_t comp,int uncompressed_size,int compressed_size,int byteorder,dmu_tx_t * tx)1165 dmu_write_embedded(objset_t *os, uint64_t object, uint64_t offset,
1166 void *data, uint8_t etype, uint8_t comp, int uncompressed_size,
1167 int compressed_size, int byteorder, dmu_tx_t *tx)
1168 {
1169 dmu_buf_t *db;
1170
1171 ASSERT3U(etype, <, NUM_BP_EMBEDDED_TYPES);
1172 ASSERT3U(comp, <, ZIO_COMPRESS_FUNCTIONS);
1173 VERIFY0(dmu_buf_hold_noread(os, object, offset,
1174 FTAG, &db));
1175
1176 dmu_buf_write_embedded(db,
1177 data, (bp_embedded_type_t)etype, (enum zio_compress)comp,
1178 uncompressed_size, compressed_size, byteorder, tx);
1179
1180 dmu_buf_rele(db, FTAG);
1181 }
1182
1183 void
dmu_redact(objset_t * os,uint64_t object,uint64_t offset,uint64_t size,dmu_tx_t * tx)1184 dmu_redact(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
1185 dmu_tx_t *tx)
1186 {
1187 int numbufs, i;
1188 dmu_buf_t **dbp;
1189
1190 VERIFY0(dmu_buf_hold_array(os, object, offset, size, FALSE, FTAG,
1191 &numbufs, &dbp));
1192 for (i = 0; i < numbufs; i++)
1193 dmu_buf_redact(dbp[i], tx);
1194 dmu_buf_rele_array(dbp, numbufs, FTAG);
1195 }
1196
1197 #ifdef _KERNEL
1198 int
dmu_read_uio_dnode(dnode_t * dn,zfs_uio_t * uio,uint64_t size)1199 dmu_read_uio_dnode(dnode_t *dn, zfs_uio_t *uio, uint64_t size)
1200 {
1201 dmu_buf_t **dbp;
1202 int numbufs, i, err;
1203
1204 /*
1205 * NB: we could do this block-at-a-time, but it's nice
1206 * to be reading in parallel.
1207 */
1208 err = dmu_buf_hold_array_by_dnode(dn, zfs_uio_offset(uio), size,
1209 TRUE, FTAG, &numbufs, &dbp, 0);
1210 if (err)
1211 return (err);
1212
1213 for (i = 0; i < numbufs; i++) {
1214 uint64_t tocpy;
1215 int64_t bufoff;
1216 dmu_buf_t *db = dbp[i];
1217
1218 ASSERT(size > 0);
1219
1220 bufoff = zfs_uio_offset(uio) - db->db_offset;
1221 tocpy = MIN(db->db_size - bufoff, size);
1222
1223 err = zfs_uio_fault_move((char *)db->db_data + bufoff, tocpy,
1224 UIO_READ, uio);
1225
1226 if (err)
1227 break;
1228
1229 size -= tocpy;
1230 }
1231 dmu_buf_rele_array(dbp, numbufs, FTAG);
1232
1233 return (err);
1234 }
1235
1236 /*
1237 * Read 'size' bytes into the uio buffer.
1238 * From object zdb->db_object.
1239 * Starting at zfs_uio_offset(uio).
1240 *
1241 * If the caller already has a dbuf in the target object
1242 * (e.g. its bonus buffer), this routine is faster than dmu_read_uio(),
1243 * because we don't have to find the dnode_t for the object.
1244 */
1245 int
dmu_read_uio_dbuf(dmu_buf_t * zdb,zfs_uio_t * uio,uint64_t size)1246 dmu_read_uio_dbuf(dmu_buf_t *zdb, zfs_uio_t *uio, uint64_t size)
1247 {
1248 dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb;
1249 dnode_t *dn;
1250 int err;
1251
1252 if (size == 0)
1253 return (0);
1254
1255 DB_DNODE_ENTER(db);
1256 dn = DB_DNODE(db);
1257 err = dmu_read_uio_dnode(dn, uio, size);
1258 DB_DNODE_EXIT(db);
1259
1260 return (err);
1261 }
1262
1263 /*
1264 * Read 'size' bytes into the uio buffer.
1265 * From the specified object
1266 * Starting at offset zfs_uio_offset(uio).
1267 */
1268 int
dmu_read_uio(objset_t * os,uint64_t object,zfs_uio_t * uio,uint64_t size)1269 dmu_read_uio(objset_t *os, uint64_t object, zfs_uio_t *uio, uint64_t size)
1270 {
1271 dnode_t *dn;
1272 int err;
1273
1274 if (size == 0)
1275 return (0);
1276
1277 err = dnode_hold(os, object, FTAG, &dn);
1278 if (err)
1279 return (err);
1280
1281 err = dmu_read_uio_dnode(dn, uio, size);
1282
1283 dnode_rele(dn, FTAG);
1284
1285 return (err);
1286 }
1287
1288 int
dmu_write_uio_dnode(dnode_t * dn,zfs_uio_t * uio,uint64_t size,dmu_tx_t * tx)1289 dmu_write_uio_dnode(dnode_t *dn, zfs_uio_t *uio, uint64_t size, dmu_tx_t *tx)
1290 {
1291 dmu_buf_t **dbp;
1292 int numbufs;
1293 int err = 0;
1294 int i;
1295
1296 err = dmu_buf_hold_array_by_dnode(dn, zfs_uio_offset(uio), size,
1297 FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH);
1298 if (err)
1299 return (err);
1300
1301 for (i = 0; i < numbufs; i++) {
1302 uint64_t tocpy;
1303 int64_t bufoff;
1304 dmu_buf_t *db = dbp[i];
1305
1306 ASSERT(size > 0);
1307
1308 bufoff = zfs_uio_offset(uio) - db->db_offset;
1309 tocpy = MIN(db->db_size - bufoff, size);
1310
1311 ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size);
1312
1313 if (tocpy == db->db_size)
1314 dmu_buf_will_fill(db, tx);
1315 else
1316 dmu_buf_will_dirty(db, tx);
1317
1318 /*
1319 * XXX zfs_uiomove could block forever (eg.nfs-backed
1320 * pages). There needs to be a uiolockdown() function
1321 * to lock the pages in memory, so that zfs_uiomove won't
1322 * block.
1323 */
1324 err = zfs_uio_fault_move((char *)db->db_data + bufoff,
1325 tocpy, UIO_WRITE, uio);
1326
1327 if (tocpy == db->db_size)
1328 dmu_buf_fill_done(db, tx);
1329
1330 if (err)
1331 break;
1332
1333 size -= tocpy;
1334 }
1335
1336 dmu_buf_rele_array(dbp, numbufs, FTAG);
1337 return (err);
1338 }
1339
1340 /*
1341 * Write 'size' bytes from the uio buffer.
1342 * To object zdb->db_object.
1343 * Starting at offset zfs_uio_offset(uio).
1344 *
1345 * If the caller already has a dbuf in the target object
1346 * (e.g. its bonus buffer), this routine is faster than dmu_write_uio(),
1347 * because we don't have to find the dnode_t for the object.
1348 */
1349 int
dmu_write_uio_dbuf(dmu_buf_t * zdb,zfs_uio_t * uio,uint64_t size,dmu_tx_t * tx)1350 dmu_write_uio_dbuf(dmu_buf_t *zdb, zfs_uio_t *uio, uint64_t size,
1351 dmu_tx_t *tx)
1352 {
1353 dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb;
1354 dnode_t *dn;
1355 int err;
1356
1357 if (size == 0)
1358 return (0);
1359
1360 DB_DNODE_ENTER(db);
1361 dn = DB_DNODE(db);
1362 err = dmu_write_uio_dnode(dn, uio, size, tx);
1363 DB_DNODE_EXIT(db);
1364
1365 return (err);
1366 }
1367
1368 /*
1369 * Write 'size' bytes from the uio buffer.
1370 * To the specified object.
1371 * Starting at offset zfs_uio_offset(uio).
1372 */
1373 int
dmu_write_uio(objset_t * os,uint64_t object,zfs_uio_t * uio,uint64_t size,dmu_tx_t * tx)1374 dmu_write_uio(objset_t *os, uint64_t object, zfs_uio_t *uio, uint64_t size,
1375 dmu_tx_t *tx)
1376 {
1377 dnode_t *dn;
1378 int err;
1379
1380 if (size == 0)
1381 return (0);
1382
1383 err = dnode_hold(os, object, FTAG, &dn);
1384 if (err)
1385 return (err);
1386
1387 err = dmu_write_uio_dnode(dn, uio, size, tx);
1388
1389 dnode_rele(dn, FTAG);
1390
1391 return (err);
1392 }
1393 #endif /* _KERNEL */
1394
1395 /*
1396 * Allocate a loaned anonymous arc buffer.
1397 */
1398 arc_buf_t *
dmu_request_arcbuf(dmu_buf_t * handle,int size)1399 dmu_request_arcbuf(dmu_buf_t *handle, int size)
1400 {
1401 dmu_buf_impl_t *db = (dmu_buf_impl_t *)handle;
1402
1403 return (arc_loan_buf(db->db_objset->os_spa, B_FALSE, size));
1404 }
1405
1406 /*
1407 * Free a loaned arc buffer.
1408 */
1409 void
dmu_return_arcbuf(arc_buf_t * buf)1410 dmu_return_arcbuf(arc_buf_t *buf)
1411 {
1412 arc_return_buf(buf, FTAG);
1413 arc_buf_destroy(buf, FTAG);
1414 }
1415
1416 /*
1417 * A "lightweight" write is faster than a regular write (e.g.
1418 * dmu_write_by_dnode() or dmu_assign_arcbuf_by_dnode()), because it avoids the
1419 * CPU cost of creating a dmu_buf_impl_t and arc_buf_[hdr_]_t. However, the
1420 * data can not be read or overwritten until the transaction's txg has been
1421 * synced. This makes it appropriate for workloads that are known to be
1422 * (temporarily) write-only, like "zfs receive".
1423 *
1424 * A single block is written, starting at the specified offset in bytes. If
1425 * the call is successful, it returns 0 and the provided abd has been
1426 * consumed (the caller should not free it).
1427 */
1428 int
dmu_lightweight_write_by_dnode(dnode_t * dn,uint64_t offset,abd_t * abd,const zio_prop_t * zp,enum zio_flag flags,dmu_tx_t * tx)1429 dmu_lightweight_write_by_dnode(dnode_t *dn, uint64_t offset, abd_t *abd,
1430 const zio_prop_t *zp, enum zio_flag flags, dmu_tx_t *tx)
1431 {
1432 dbuf_dirty_record_t *dr =
1433 dbuf_dirty_lightweight(dn, dbuf_whichblock(dn, 0, offset), tx);
1434 if (dr == NULL)
1435 return (SET_ERROR(EIO));
1436 dr->dt.dll.dr_abd = abd;
1437 dr->dt.dll.dr_props = *zp;
1438 dr->dt.dll.dr_flags = flags;
1439 return (0);
1440 }
1441
1442 /*
1443 * When possible directly assign passed loaned arc buffer to a dbuf.
1444 * If this is not possible copy the contents of passed arc buf via
1445 * dmu_write().
1446 */
1447 int
dmu_assign_arcbuf_by_dnode(dnode_t * dn,uint64_t offset,arc_buf_t * buf,dmu_tx_t * tx)1448 dmu_assign_arcbuf_by_dnode(dnode_t *dn, uint64_t offset, arc_buf_t *buf,
1449 dmu_tx_t *tx)
1450 {
1451 dmu_buf_impl_t *db;
1452 objset_t *os = dn->dn_objset;
1453 uint64_t object = dn->dn_object;
1454 uint32_t blksz = (uint32_t)arc_buf_lsize(buf);
1455 uint64_t blkid;
1456
1457 rw_enter(&dn->dn_struct_rwlock, RW_READER);
1458 blkid = dbuf_whichblock(dn, 0, offset);
1459 db = dbuf_hold(dn, blkid, FTAG);
1460 if (db == NULL)
1461 return (SET_ERROR(EIO));
1462 rw_exit(&dn->dn_struct_rwlock);
1463
1464 /*
1465 * We can only assign if the offset is aligned and the arc buf is the
1466 * same size as the dbuf.
1467 */
1468 if (offset == db->db.db_offset && blksz == db->db.db_size) {
1469 zfs_racct_write(blksz, 1);
1470 dbuf_assign_arcbuf(db, buf, tx);
1471 dbuf_rele(db, FTAG);
1472 } else {
1473 /* compressed bufs must always be assignable to their dbuf */
1474 ASSERT3U(arc_get_compression(buf), ==, ZIO_COMPRESS_OFF);
1475 ASSERT(!(buf->b_flags & ARC_BUF_FLAG_COMPRESSED));
1476
1477 dbuf_rele(db, FTAG);
1478 dmu_write(os, object, offset, blksz, buf->b_data, tx);
1479 dmu_return_arcbuf(buf);
1480 }
1481
1482 return (0);
1483 }
1484
1485 int
dmu_assign_arcbuf_by_dbuf(dmu_buf_t * handle,uint64_t offset,arc_buf_t * buf,dmu_tx_t * tx)1486 dmu_assign_arcbuf_by_dbuf(dmu_buf_t *handle, uint64_t offset, arc_buf_t *buf,
1487 dmu_tx_t *tx)
1488 {
1489 int err;
1490 dmu_buf_impl_t *dbuf = (dmu_buf_impl_t *)handle;
1491
1492 DB_DNODE_ENTER(dbuf);
1493 err = dmu_assign_arcbuf_by_dnode(DB_DNODE(dbuf), offset, buf, tx);
1494 DB_DNODE_EXIT(dbuf);
1495
1496 return (err);
1497 }
1498
1499 typedef struct {
1500 dbuf_dirty_record_t *dsa_dr;
1501 dmu_sync_cb_t *dsa_done;
1502 zgd_t *dsa_zgd;
1503 dmu_tx_t *dsa_tx;
1504 } dmu_sync_arg_t;
1505
1506 static void
dmu_sync_ready(zio_t * zio,arc_buf_t * buf,void * varg)1507 dmu_sync_ready(zio_t *zio, arc_buf_t *buf, void *varg)
1508 {
1509 (void) buf;
1510 dmu_sync_arg_t *dsa = varg;
1511 dmu_buf_t *db = dsa->dsa_zgd->zgd_db;
1512 blkptr_t *bp = zio->io_bp;
1513
1514 if (zio->io_error == 0) {
1515 if (BP_IS_HOLE(bp)) {
1516 /*
1517 * A block of zeros may compress to a hole, but the
1518 * block size still needs to be known for replay.
1519 */
1520 BP_SET_LSIZE(bp, db->db_size);
1521 } else if (!BP_IS_EMBEDDED(bp)) {
1522 ASSERT(BP_GET_LEVEL(bp) == 0);
1523 BP_SET_FILL(bp, 1);
1524 }
1525 }
1526 }
1527
1528 static void
dmu_sync_late_arrival_ready(zio_t * zio)1529 dmu_sync_late_arrival_ready(zio_t *zio)
1530 {
1531 dmu_sync_ready(zio, NULL, zio->io_private);
1532 }
1533
1534 static void
dmu_sync_done(zio_t * zio,arc_buf_t * buf,void * varg)1535 dmu_sync_done(zio_t *zio, arc_buf_t *buf, void *varg)
1536 {
1537 (void) buf;
1538 dmu_sync_arg_t *dsa = varg;
1539 dbuf_dirty_record_t *dr = dsa->dsa_dr;
1540 dmu_buf_impl_t *db = dr->dr_dbuf;
1541 zgd_t *zgd = dsa->dsa_zgd;
1542
1543 /*
1544 * Record the vdev(s) backing this blkptr so they can be flushed after
1545 * the writes for the lwb have completed.
1546 */
1547 if (zio->io_error == 0) {
1548 zil_lwb_add_block(zgd->zgd_lwb, zgd->zgd_bp);
1549 }
1550
1551 mutex_enter(&db->db_mtx);
1552 ASSERT(dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC);
1553 if (zio->io_error == 0) {
1554 dr->dt.dl.dr_nopwrite = !!(zio->io_flags & ZIO_FLAG_NOPWRITE);
1555 if (dr->dt.dl.dr_nopwrite) {
1556 blkptr_t *bp = zio->io_bp;
1557 blkptr_t *bp_orig = &zio->io_bp_orig;
1558 uint8_t chksum = BP_GET_CHECKSUM(bp_orig);
1559
1560 ASSERT(BP_EQUAL(bp, bp_orig));
1561 VERIFY(BP_EQUAL(bp, db->db_blkptr));
1562 ASSERT(zio->io_prop.zp_compress != ZIO_COMPRESS_OFF);
1563 VERIFY(zio_checksum_table[chksum].ci_flags &
1564 ZCHECKSUM_FLAG_NOPWRITE);
1565 }
1566 dr->dt.dl.dr_overridden_by = *zio->io_bp;
1567 dr->dt.dl.dr_override_state = DR_OVERRIDDEN;
1568 dr->dt.dl.dr_copies = zio->io_prop.zp_copies;
1569
1570 /*
1571 * Old style holes are filled with all zeros, whereas
1572 * new-style holes maintain their lsize, type, level,
1573 * and birth time (see zio_write_compress). While we
1574 * need to reset the BP_SET_LSIZE() call that happened
1575 * in dmu_sync_ready for old style holes, we do *not*
1576 * want to wipe out the information contained in new
1577 * style holes. Thus, only zero out the block pointer if
1578 * it's an old style hole.
1579 */
1580 if (BP_IS_HOLE(&dr->dt.dl.dr_overridden_by) &&
1581 dr->dt.dl.dr_overridden_by.blk_birth == 0)
1582 BP_ZERO(&dr->dt.dl.dr_overridden_by);
1583 } else {
1584 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
1585 }
1586 cv_broadcast(&db->db_changed);
1587 mutex_exit(&db->db_mtx);
1588
1589 dsa->dsa_done(dsa->dsa_zgd, zio->io_error);
1590
1591 kmem_free(dsa, sizeof (*dsa));
1592 }
1593
1594 static void
dmu_sync_late_arrival_done(zio_t * zio)1595 dmu_sync_late_arrival_done(zio_t *zio)
1596 {
1597 blkptr_t *bp = zio->io_bp;
1598 dmu_sync_arg_t *dsa = zio->io_private;
1599 zgd_t *zgd = dsa->dsa_zgd;
1600
1601 if (zio->io_error == 0) {
1602 /*
1603 * Record the vdev(s) backing this blkptr so they can be
1604 * flushed after the writes for the lwb have completed.
1605 */
1606 zil_lwb_add_block(zgd->zgd_lwb, zgd->zgd_bp);
1607
1608 if (!BP_IS_HOLE(bp)) {
1609 blkptr_t *bp_orig __maybe_unused = &zio->io_bp_orig;
1610 ASSERT(!(zio->io_flags & ZIO_FLAG_NOPWRITE));
1611 ASSERT(BP_IS_HOLE(bp_orig) || !BP_EQUAL(bp, bp_orig));
1612 ASSERT(zio->io_bp->blk_birth == zio->io_txg);
1613 ASSERT(zio->io_txg > spa_syncing_txg(zio->io_spa));
1614 zio_free(zio->io_spa, zio->io_txg, zio->io_bp);
1615 }
1616 }
1617
1618 dmu_tx_commit(dsa->dsa_tx);
1619
1620 dsa->dsa_done(dsa->dsa_zgd, zio->io_error);
1621
1622 abd_free(zio->io_abd);
1623 kmem_free(dsa, sizeof (*dsa));
1624 }
1625
1626 static int
dmu_sync_late_arrival(zio_t * pio,objset_t * os,dmu_sync_cb_t * done,zgd_t * zgd,zio_prop_t * zp,zbookmark_phys_t * zb)1627 dmu_sync_late_arrival(zio_t *pio, objset_t *os, dmu_sync_cb_t *done, zgd_t *zgd,
1628 zio_prop_t *zp, zbookmark_phys_t *zb)
1629 {
1630 dmu_sync_arg_t *dsa;
1631 dmu_tx_t *tx;
1632
1633 tx = dmu_tx_create(os);
1634 dmu_tx_hold_space(tx, zgd->zgd_db->db_size);
1635 if (dmu_tx_assign(tx, TXG_WAIT) != 0) {
1636 dmu_tx_abort(tx);
1637 /* Make zl_get_data do txg_waited_synced() */
1638 return (SET_ERROR(EIO));
1639 }
1640
1641 /*
1642 * In order to prevent the zgd's lwb from being free'd prior to
1643 * dmu_sync_late_arrival_done() being called, we have to ensure
1644 * the lwb's "max txg" takes this tx's txg into account.
1645 */
1646 zil_lwb_add_txg(zgd->zgd_lwb, dmu_tx_get_txg(tx));
1647
1648 dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP);
1649 dsa->dsa_dr = NULL;
1650 dsa->dsa_done = done;
1651 dsa->dsa_zgd = zgd;
1652 dsa->dsa_tx = tx;
1653
1654 /*
1655 * Since we are currently syncing this txg, it's nontrivial to
1656 * determine what BP to nopwrite against, so we disable nopwrite.
1657 *
1658 * When syncing, the db_blkptr is initially the BP of the previous
1659 * txg. We can not nopwrite against it because it will be changed
1660 * (this is similar to the non-late-arrival case where the dbuf is
1661 * dirty in a future txg).
1662 *
1663 * Then dbuf_write_ready() sets bp_blkptr to the location we will write.
1664 * We can not nopwrite against it because although the BP will not
1665 * (typically) be changed, the data has not yet been persisted to this
1666 * location.
1667 *
1668 * Finally, when dbuf_write_done() is called, it is theoretically
1669 * possible to always nopwrite, because the data that was written in
1670 * this txg is the same data that we are trying to write. However we
1671 * would need to check that this dbuf is not dirty in any future
1672 * txg's (as we do in the normal dmu_sync() path). For simplicity, we
1673 * don't nopwrite in this case.
1674 */
1675 zp->zp_nopwrite = B_FALSE;
1676
1677 zio_nowait(zio_write(pio, os->os_spa, dmu_tx_get_txg(tx), zgd->zgd_bp,
1678 abd_get_from_buf(zgd->zgd_db->db_data, zgd->zgd_db->db_size),
1679 zgd->zgd_db->db_size, zgd->zgd_db->db_size, zp,
1680 dmu_sync_late_arrival_ready, NULL, NULL, dmu_sync_late_arrival_done,
1681 dsa, ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, zb));
1682
1683 return (0);
1684 }
1685
1686 /*
1687 * Intent log support: sync the block associated with db to disk.
1688 * N.B. and XXX: the caller is responsible for making sure that the
1689 * data isn't changing while dmu_sync() is writing it.
1690 *
1691 * Return values:
1692 *
1693 * EEXIST: this txg has already been synced, so there's nothing to do.
1694 * The caller should not log the write.
1695 *
1696 * ENOENT: the block was dbuf_free_range()'d, so there's nothing to do.
1697 * The caller should not log the write.
1698 *
1699 * EALREADY: this block is already in the process of being synced.
1700 * The caller should track its progress (somehow).
1701 *
1702 * EIO: could not do the I/O.
1703 * The caller should do a txg_wait_synced().
1704 *
1705 * 0: the I/O has been initiated.
1706 * The caller should log this blkptr in the done callback.
1707 * It is possible that the I/O will fail, in which case
1708 * the error will be reported to the done callback and
1709 * propagated to pio from zio_done().
1710 */
1711 int
dmu_sync(zio_t * pio,uint64_t txg,dmu_sync_cb_t * done,zgd_t * zgd)1712 dmu_sync(zio_t *pio, uint64_t txg, dmu_sync_cb_t *done, zgd_t *zgd)
1713 {
1714 dmu_buf_impl_t *db = (dmu_buf_impl_t *)zgd->zgd_db;
1715 objset_t *os = db->db_objset;
1716 dsl_dataset_t *ds = os->os_dsl_dataset;
1717 dbuf_dirty_record_t *dr, *dr_next;
1718 dmu_sync_arg_t *dsa;
1719 zbookmark_phys_t zb;
1720 zio_prop_t zp;
1721 dnode_t *dn;
1722
1723 ASSERT(pio != NULL);
1724 ASSERT(txg != 0);
1725
1726 SET_BOOKMARK(&zb, ds->ds_object,
1727 db->db.db_object, db->db_level, db->db_blkid);
1728
1729 DB_DNODE_ENTER(db);
1730 dn = DB_DNODE(db);
1731 dmu_write_policy(os, dn, db->db_level, WP_DMU_SYNC, &zp);
1732 DB_DNODE_EXIT(db);
1733
1734 /*
1735 * If we're frozen (running ziltest), we always need to generate a bp.
1736 */
1737 if (txg > spa_freeze_txg(os->os_spa))
1738 return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb));
1739
1740 /*
1741 * Grabbing db_mtx now provides a barrier between dbuf_sync_leaf()
1742 * and us. If we determine that this txg is not yet syncing,
1743 * but it begins to sync a moment later, that's OK because the
1744 * sync thread will block in dbuf_sync_leaf() until we drop db_mtx.
1745 */
1746 mutex_enter(&db->db_mtx);
1747
1748 if (txg <= spa_last_synced_txg(os->os_spa)) {
1749 /*
1750 * This txg has already synced. There's nothing to do.
1751 */
1752 mutex_exit(&db->db_mtx);
1753 return (SET_ERROR(EEXIST));
1754 }
1755
1756 if (txg <= spa_syncing_txg(os->os_spa)) {
1757 /*
1758 * This txg is currently syncing, so we can't mess with
1759 * the dirty record anymore; just write a new log block.
1760 */
1761 mutex_exit(&db->db_mtx);
1762 return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb));
1763 }
1764
1765 dr = dbuf_find_dirty_eq(db, txg);
1766
1767 if (dr == NULL) {
1768 /*
1769 * There's no dr for this dbuf, so it must have been freed.
1770 * There's no need to log writes to freed blocks, so we're done.
1771 */
1772 mutex_exit(&db->db_mtx);
1773 return (SET_ERROR(ENOENT));
1774 }
1775
1776 dr_next = list_next(&db->db_dirty_records, dr);
1777 ASSERT(dr_next == NULL || dr_next->dr_txg < txg);
1778
1779 if (db->db_blkptr != NULL) {
1780 /*
1781 * We need to fill in zgd_bp with the current blkptr so that
1782 * the nopwrite code can check if we're writing the same
1783 * data that's already on disk. We can only nopwrite if we
1784 * are sure that after making the copy, db_blkptr will not
1785 * change until our i/o completes. We ensure this by
1786 * holding the db_mtx, and only allowing nopwrite if the
1787 * block is not already dirty (see below). This is verified
1788 * by dmu_sync_done(), which VERIFYs that the db_blkptr has
1789 * not changed.
1790 */
1791 *zgd->zgd_bp = *db->db_blkptr;
1792 }
1793
1794 /*
1795 * Assume the on-disk data is X, the current syncing data (in
1796 * txg - 1) is Y, and the current in-memory data is Z (currently
1797 * in dmu_sync).
1798 *
1799 * We usually want to perform a nopwrite if X and Z are the
1800 * same. However, if Y is different (i.e. the BP is going to
1801 * change before this write takes effect), then a nopwrite will
1802 * be incorrect - we would override with X, which could have
1803 * been freed when Y was written.
1804 *
1805 * (Note that this is not a concern when we are nop-writing from
1806 * syncing context, because X and Y must be identical, because
1807 * all previous txgs have been synced.)
1808 *
1809 * Therefore, we disable nopwrite if the current BP could change
1810 * before this TXG. There are two ways it could change: by
1811 * being dirty (dr_next is non-NULL), or by being freed
1812 * (dnode_block_freed()). This behavior is verified by
1813 * zio_done(), which VERIFYs that the override BP is identical
1814 * to the on-disk BP.
1815 */
1816 DB_DNODE_ENTER(db);
1817 dn = DB_DNODE(db);
1818 if (dr_next != NULL || dnode_block_freed(dn, db->db_blkid))
1819 zp.zp_nopwrite = B_FALSE;
1820 DB_DNODE_EXIT(db);
1821
1822 ASSERT(dr->dr_txg == txg);
1823 if (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC ||
1824 dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
1825 /*
1826 * We have already issued a sync write for this buffer,
1827 * or this buffer has already been synced. It could not
1828 * have been dirtied since, or we would have cleared the state.
1829 */
1830 mutex_exit(&db->db_mtx);
1831 return (SET_ERROR(EALREADY));
1832 }
1833
1834 ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
1835 dr->dt.dl.dr_override_state = DR_IN_DMU_SYNC;
1836 mutex_exit(&db->db_mtx);
1837
1838 dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP);
1839 dsa->dsa_dr = dr;
1840 dsa->dsa_done = done;
1841 dsa->dsa_zgd = zgd;
1842 dsa->dsa_tx = NULL;
1843
1844 zio_nowait(arc_write(pio, os->os_spa, txg,
1845 zgd->zgd_bp, dr->dt.dl.dr_data, DBUF_IS_L2CACHEABLE(db),
1846 &zp, dmu_sync_ready, NULL, NULL, dmu_sync_done, dsa,
1847 ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, &zb));
1848
1849 return (0);
1850 }
1851
1852 int
dmu_object_set_nlevels(objset_t * os,uint64_t object,int nlevels,dmu_tx_t * tx)1853 dmu_object_set_nlevels(objset_t *os, uint64_t object, int nlevels, dmu_tx_t *tx)
1854 {
1855 dnode_t *dn;
1856 int err;
1857
1858 err = dnode_hold(os, object, FTAG, &dn);
1859 if (err)
1860 return (err);
1861 err = dnode_set_nlevels(dn, nlevels, tx);
1862 dnode_rele(dn, FTAG);
1863 return (err);
1864 }
1865
1866 int
dmu_object_set_blocksize(objset_t * os,uint64_t object,uint64_t size,int ibs,dmu_tx_t * tx)1867 dmu_object_set_blocksize(objset_t *os, uint64_t object, uint64_t size, int ibs,
1868 dmu_tx_t *tx)
1869 {
1870 dnode_t *dn;
1871 int err;
1872
1873 err = dnode_hold(os, object, FTAG, &dn);
1874 if (err)
1875 return (err);
1876 err = dnode_set_blksz(dn, size, ibs, tx);
1877 dnode_rele(dn, FTAG);
1878 return (err);
1879 }
1880
1881 int
dmu_object_set_maxblkid(objset_t * os,uint64_t object,uint64_t maxblkid,dmu_tx_t * tx)1882 dmu_object_set_maxblkid(objset_t *os, uint64_t object, uint64_t maxblkid,
1883 dmu_tx_t *tx)
1884 {
1885 dnode_t *dn;
1886 int err;
1887
1888 err = dnode_hold(os, object, FTAG, &dn);
1889 if (err)
1890 return (err);
1891 rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
1892 dnode_new_blkid(dn, maxblkid, tx, B_FALSE, B_TRUE);
1893 rw_exit(&dn->dn_struct_rwlock);
1894 dnode_rele(dn, FTAG);
1895 return (0);
1896 }
1897
1898 void
dmu_object_set_checksum(objset_t * os,uint64_t object,uint8_t checksum,dmu_tx_t * tx)1899 dmu_object_set_checksum(objset_t *os, uint64_t object, uint8_t checksum,
1900 dmu_tx_t *tx)
1901 {
1902 dnode_t *dn;
1903
1904 /*
1905 * Send streams include each object's checksum function. This
1906 * check ensures that the receiving system can understand the
1907 * checksum function transmitted.
1908 */
1909 ASSERT3U(checksum, <, ZIO_CHECKSUM_LEGACY_FUNCTIONS);
1910
1911 VERIFY0(dnode_hold(os, object, FTAG, &dn));
1912 ASSERT3U(checksum, <, ZIO_CHECKSUM_FUNCTIONS);
1913 dn->dn_checksum = checksum;
1914 dnode_setdirty(dn, tx);
1915 dnode_rele(dn, FTAG);
1916 }
1917
1918 void
dmu_object_set_compress(objset_t * os,uint64_t object,uint8_t compress,dmu_tx_t * tx)1919 dmu_object_set_compress(objset_t *os, uint64_t object, uint8_t compress,
1920 dmu_tx_t *tx)
1921 {
1922 dnode_t *dn;
1923
1924 /*
1925 * Send streams include each object's compression function. This
1926 * check ensures that the receiving system can understand the
1927 * compression function transmitted.
1928 */
1929 ASSERT3U(compress, <, ZIO_COMPRESS_LEGACY_FUNCTIONS);
1930
1931 VERIFY0(dnode_hold(os, object, FTAG, &dn));
1932 dn->dn_compress = compress;
1933 dnode_setdirty(dn, tx);
1934 dnode_rele(dn, FTAG);
1935 }
1936
1937 /*
1938 * When the "redundant_metadata" property is set to "most", only indirect
1939 * blocks of this level and higher will have an additional ditto block.
1940 */
1941 int zfs_redundant_metadata_most_ditto_level = 2;
1942
1943 void
dmu_write_policy(objset_t * os,dnode_t * dn,int level,int wp,zio_prop_t * zp)1944 dmu_write_policy(objset_t *os, dnode_t *dn, int level, int wp, zio_prop_t *zp)
1945 {
1946 dmu_object_type_t type = dn ? dn->dn_type : DMU_OT_OBJSET;
1947 boolean_t ismd = (level > 0 || DMU_OT_IS_METADATA(type) ||
1948 (wp & WP_SPILL));
1949 enum zio_checksum checksum = os->os_checksum;
1950 enum zio_compress compress = os->os_compress;
1951 uint8_t complevel = os->os_complevel;
1952 enum zio_checksum dedup_checksum = os->os_dedup_checksum;
1953 boolean_t dedup = B_FALSE;
1954 boolean_t nopwrite = B_FALSE;
1955 boolean_t dedup_verify = os->os_dedup_verify;
1956 boolean_t encrypt = B_FALSE;
1957 int copies = os->os_copies;
1958
1959 /*
1960 * We maintain different write policies for each of the following
1961 * types of data:
1962 * 1. metadata
1963 * 2. preallocated blocks (i.e. level-0 blocks of a dump device)
1964 * 3. all other level 0 blocks
1965 */
1966 if (ismd) {
1967 /*
1968 * XXX -- we should design a compression algorithm
1969 * that specializes in arrays of bps.
1970 */
1971 compress = zio_compress_select(os->os_spa,
1972 ZIO_COMPRESS_ON, ZIO_COMPRESS_ON);
1973
1974 /*
1975 * Metadata always gets checksummed. If the data
1976 * checksum is multi-bit correctable, and it's not a
1977 * ZBT-style checksum, then it's suitable for metadata
1978 * as well. Otherwise, the metadata checksum defaults
1979 * to fletcher4.
1980 */
1981 if (!(zio_checksum_table[checksum].ci_flags &
1982 ZCHECKSUM_FLAG_METADATA) ||
1983 (zio_checksum_table[checksum].ci_flags &
1984 ZCHECKSUM_FLAG_EMBEDDED))
1985 checksum = ZIO_CHECKSUM_FLETCHER_4;
1986
1987 if (os->os_redundant_metadata == ZFS_REDUNDANT_METADATA_ALL ||
1988 (os->os_redundant_metadata ==
1989 ZFS_REDUNDANT_METADATA_MOST &&
1990 (level >= zfs_redundant_metadata_most_ditto_level ||
1991 DMU_OT_IS_METADATA(type) || (wp & WP_SPILL))))
1992 copies++;
1993 } else if (wp & WP_NOFILL) {
1994 ASSERT(level == 0);
1995
1996 /*
1997 * If we're writing preallocated blocks, we aren't actually
1998 * writing them so don't set any policy properties. These
1999 * blocks are currently only used by an external subsystem
2000 * outside of zfs (i.e. dump) and not written by the zio
2001 * pipeline.
2002 */
2003 compress = ZIO_COMPRESS_OFF;
2004 checksum = ZIO_CHECKSUM_OFF;
2005 } else {
2006 compress = zio_compress_select(os->os_spa, dn->dn_compress,
2007 compress);
2008 complevel = zio_complevel_select(os->os_spa, compress,
2009 complevel, complevel);
2010
2011 checksum = (dedup_checksum == ZIO_CHECKSUM_OFF) ?
2012 zio_checksum_select(dn->dn_checksum, checksum) :
2013 dedup_checksum;
2014
2015 /*
2016 * Determine dedup setting. If we are in dmu_sync(),
2017 * we won't actually dedup now because that's all
2018 * done in syncing context; but we do want to use the
2019 * dedup checksum. If the checksum is not strong
2020 * enough to ensure unique signatures, force
2021 * dedup_verify.
2022 */
2023 if (dedup_checksum != ZIO_CHECKSUM_OFF) {
2024 dedup = (wp & WP_DMU_SYNC) ? B_FALSE : B_TRUE;
2025 if (!(zio_checksum_table[checksum].ci_flags &
2026 ZCHECKSUM_FLAG_DEDUP))
2027 dedup_verify = B_TRUE;
2028 }
2029
2030 /*
2031 * Enable nopwrite if we have secure enough checksum
2032 * algorithm (see comment in zio_nop_write) and
2033 * compression is enabled. We don't enable nopwrite if
2034 * dedup is enabled as the two features are mutually
2035 * exclusive.
2036 */
2037 nopwrite = (!dedup && (zio_checksum_table[checksum].ci_flags &
2038 ZCHECKSUM_FLAG_NOPWRITE) &&
2039 compress != ZIO_COMPRESS_OFF && zfs_nopwrite_enabled);
2040 }
2041
2042 /*
2043 * All objects in an encrypted objset are protected from modification
2044 * via a MAC. Encrypted objects store their IV and salt in the last DVA
2045 * in the bp, so we cannot use all copies. Encrypted objects are also
2046 * not subject to nopwrite since writing the same data will still
2047 * result in a new ciphertext. Only encrypted blocks can be dedup'd
2048 * to avoid ambiguity in the dedup code since the DDT does not store
2049 * object types.
2050 */
2051 if (os->os_encrypted && (wp & WP_NOFILL) == 0) {
2052 encrypt = B_TRUE;
2053
2054 if (DMU_OT_IS_ENCRYPTED(type)) {
2055 copies = MIN(copies, SPA_DVAS_PER_BP - 1);
2056 nopwrite = B_FALSE;
2057 } else {
2058 dedup = B_FALSE;
2059 }
2060
2061 if (level <= 0 &&
2062 (type == DMU_OT_DNODE || type == DMU_OT_OBJSET)) {
2063 compress = ZIO_COMPRESS_EMPTY;
2064 }
2065 }
2066
2067 zp->zp_compress = compress;
2068 zp->zp_complevel = complevel;
2069 zp->zp_checksum = checksum;
2070 zp->zp_type = (wp & WP_SPILL) ? dn->dn_bonustype : type;
2071 zp->zp_level = level;
2072 zp->zp_copies = MIN(copies, spa_max_replication(os->os_spa));
2073 zp->zp_dedup = dedup;
2074 zp->zp_dedup_verify = dedup && dedup_verify;
2075 zp->zp_nopwrite = nopwrite;
2076 zp->zp_encrypt = encrypt;
2077 zp->zp_byteorder = ZFS_HOST_BYTEORDER;
2078 bzero(zp->zp_salt, ZIO_DATA_SALT_LEN);
2079 bzero(zp->zp_iv, ZIO_DATA_IV_LEN);
2080 bzero(zp->zp_mac, ZIO_DATA_MAC_LEN);
2081 zp->zp_zpl_smallblk = DMU_OT_IS_FILE(zp->zp_type) ?
2082 os->os_zpl_special_smallblock : 0;
2083
2084 ASSERT3U(zp->zp_compress, !=, ZIO_COMPRESS_INHERIT);
2085 }
2086
2087 /*
2088 * This function is only called from zfs_holey_common() for zpl_llseek()
2089 * in order to determine the location of holes. In order to accurately
2090 * report holes all dirty data must be synced to disk. This causes extremely
2091 * poor performance when seeking for holes in a dirty file. As a compromise,
2092 * only provide hole data when the dnode is clean. When a dnode is dirty
2093 * report the dnode as having no holes which is always a safe thing to do.
2094 */
2095 int
dmu_offset_next(objset_t * os,uint64_t object,boolean_t hole,uint64_t * off)2096 dmu_offset_next(objset_t *os, uint64_t object, boolean_t hole, uint64_t *off)
2097 {
2098 dnode_t *dn;
2099 int err;
2100
2101 restart:
2102 err = dnode_hold(os, object, FTAG, &dn);
2103 if (err)
2104 return (err);
2105
2106 rw_enter(&dn->dn_struct_rwlock, RW_READER);
2107
2108 if (dnode_is_dirty(dn)) {
2109 /*
2110 * If the zfs_dmu_offset_next_sync module option is enabled
2111 * then strict hole reporting has been requested. Dirty
2112 * dnodes must be synced to disk to accurately report all
2113 * holes. When disabled (the default) dirty dnodes are
2114 * reported to not have any holes which is always safe.
2115 *
2116 * When called by zfs_holey_common() the zp->z_rangelock
2117 * is held to prevent zfs_write() and mmap writeback from
2118 * re-dirtying the dnode after txg_wait_synced().
2119 */
2120 if (zfs_dmu_offset_next_sync) {
2121 rw_exit(&dn->dn_struct_rwlock);
2122 dnode_rele(dn, FTAG);
2123 txg_wait_synced(dmu_objset_pool(os), 0);
2124 goto restart;
2125 }
2126
2127 err = SET_ERROR(EBUSY);
2128 } else {
2129 err = dnode_next_offset(dn, DNODE_FIND_HAVELOCK |
2130 (hole ? DNODE_FIND_HOLE : 0), off, 1, 1, 0);
2131 }
2132
2133 rw_exit(&dn->dn_struct_rwlock);
2134 dnode_rele(dn, FTAG);
2135
2136 return (err);
2137 }
2138
2139 void
__dmu_object_info_from_dnode(dnode_t * dn,dmu_object_info_t * doi)2140 __dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi)
2141 {
2142 dnode_phys_t *dnp = dn->dn_phys;
2143
2144 doi->doi_data_block_size = dn->dn_datablksz;
2145 doi->doi_metadata_block_size = dn->dn_indblkshift ?
2146 1ULL << dn->dn_indblkshift : 0;
2147 doi->doi_type = dn->dn_type;
2148 doi->doi_bonus_type = dn->dn_bonustype;
2149 doi->doi_bonus_size = dn->dn_bonuslen;
2150 doi->doi_dnodesize = dn->dn_num_slots << DNODE_SHIFT;
2151 doi->doi_indirection = dn->dn_nlevels;
2152 doi->doi_checksum = dn->dn_checksum;
2153 doi->doi_compress = dn->dn_compress;
2154 doi->doi_nblkptr = dn->dn_nblkptr;
2155 doi->doi_physical_blocks_512 = (DN_USED_BYTES(dnp) + 256) >> 9;
2156 doi->doi_max_offset = (dn->dn_maxblkid + 1) * dn->dn_datablksz;
2157 doi->doi_fill_count = 0;
2158 for (int i = 0; i < dnp->dn_nblkptr; i++)
2159 doi->doi_fill_count += BP_GET_FILL(&dnp->dn_blkptr[i]);
2160 }
2161
2162 void
dmu_object_info_from_dnode(dnode_t * dn,dmu_object_info_t * doi)2163 dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi)
2164 {
2165 rw_enter(&dn->dn_struct_rwlock, RW_READER);
2166 mutex_enter(&dn->dn_mtx);
2167
2168 __dmu_object_info_from_dnode(dn, doi);
2169
2170 mutex_exit(&dn->dn_mtx);
2171 rw_exit(&dn->dn_struct_rwlock);
2172 }
2173
2174 /*
2175 * Get information on a DMU object.
2176 * If doi is NULL, just indicates whether the object exists.
2177 */
2178 int
dmu_object_info(objset_t * os,uint64_t object,dmu_object_info_t * doi)2179 dmu_object_info(objset_t *os, uint64_t object, dmu_object_info_t *doi)
2180 {
2181 dnode_t *dn;
2182 int err = dnode_hold(os, object, FTAG, &dn);
2183
2184 if (err)
2185 return (err);
2186
2187 if (doi != NULL)
2188 dmu_object_info_from_dnode(dn, doi);
2189
2190 dnode_rele(dn, FTAG);
2191 return (0);
2192 }
2193
2194 /*
2195 * As above, but faster; can be used when you have a held dbuf in hand.
2196 */
2197 void
dmu_object_info_from_db(dmu_buf_t * db_fake,dmu_object_info_t * doi)2198 dmu_object_info_from_db(dmu_buf_t *db_fake, dmu_object_info_t *doi)
2199 {
2200 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2201
2202 DB_DNODE_ENTER(db);
2203 dmu_object_info_from_dnode(DB_DNODE(db), doi);
2204 DB_DNODE_EXIT(db);
2205 }
2206
2207 /*
2208 * Faster still when you only care about the size.
2209 */
2210 void
dmu_object_size_from_db(dmu_buf_t * db_fake,uint32_t * blksize,u_longlong_t * nblk512)2211 dmu_object_size_from_db(dmu_buf_t *db_fake, uint32_t *blksize,
2212 u_longlong_t *nblk512)
2213 {
2214 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2215 dnode_t *dn;
2216
2217 DB_DNODE_ENTER(db);
2218 dn = DB_DNODE(db);
2219
2220 *blksize = dn->dn_datablksz;
2221 /* add in number of slots used for the dnode itself */
2222 *nblk512 = ((DN_USED_BYTES(dn->dn_phys) + SPA_MINBLOCKSIZE/2) >>
2223 SPA_MINBLOCKSHIFT) + dn->dn_num_slots;
2224 DB_DNODE_EXIT(db);
2225 }
2226
2227 void
dmu_object_dnsize_from_db(dmu_buf_t * db_fake,int * dnsize)2228 dmu_object_dnsize_from_db(dmu_buf_t *db_fake, int *dnsize)
2229 {
2230 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2231 dnode_t *dn;
2232
2233 DB_DNODE_ENTER(db);
2234 dn = DB_DNODE(db);
2235 *dnsize = dn->dn_num_slots << DNODE_SHIFT;
2236 DB_DNODE_EXIT(db);
2237 }
2238
2239 void
byteswap_uint64_array(void * vbuf,size_t size)2240 byteswap_uint64_array(void *vbuf, size_t size)
2241 {
2242 uint64_t *buf = vbuf;
2243 size_t count = size >> 3;
2244 int i;
2245
2246 ASSERT((size & 7) == 0);
2247
2248 for (i = 0; i < count; i++)
2249 buf[i] = BSWAP_64(buf[i]);
2250 }
2251
2252 void
byteswap_uint32_array(void * vbuf,size_t size)2253 byteswap_uint32_array(void *vbuf, size_t size)
2254 {
2255 uint32_t *buf = vbuf;
2256 size_t count = size >> 2;
2257 int i;
2258
2259 ASSERT((size & 3) == 0);
2260
2261 for (i = 0; i < count; i++)
2262 buf[i] = BSWAP_32(buf[i]);
2263 }
2264
2265 void
byteswap_uint16_array(void * vbuf,size_t size)2266 byteswap_uint16_array(void *vbuf, size_t size)
2267 {
2268 uint16_t *buf = vbuf;
2269 size_t count = size >> 1;
2270 int i;
2271
2272 ASSERT((size & 1) == 0);
2273
2274 for (i = 0; i < count; i++)
2275 buf[i] = BSWAP_16(buf[i]);
2276 }
2277
2278 void
byteswap_uint8_array(void * vbuf,size_t size)2279 byteswap_uint8_array(void *vbuf, size_t size)
2280 {
2281 (void) vbuf, (void) size;
2282 }
2283
2284 void
dmu_init(void)2285 dmu_init(void)
2286 {
2287 abd_init();
2288 zfs_dbgmsg_init();
2289 sa_cache_init();
2290 dmu_objset_init();
2291 dnode_init();
2292 zfetch_init();
2293 dmu_tx_init();
2294 l2arc_init();
2295 arc_init();
2296 dbuf_init();
2297 }
2298
2299 void
dmu_fini(void)2300 dmu_fini(void)
2301 {
2302 arc_fini(); /* arc depends on l2arc, so arc must go first */
2303 l2arc_fini();
2304 dmu_tx_fini();
2305 zfetch_fini();
2306 dbuf_fini();
2307 dnode_fini();
2308 dmu_objset_fini();
2309 sa_cache_fini();
2310 zfs_dbgmsg_fini();
2311 abd_fini();
2312 }
2313
2314 EXPORT_SYMBOL(dmu_bonus_hold);
2315 EXPORT_SYMBOL(dmu_bonus_hold_by_dnode);
2316 EXPORT_SYMBOL(dmu_buf_hold_array_by_bonus);
2317 EXPORT_SYMBOL(dmu_buf_rele_array);
2318 EXPORT_SYMBOL(dmu_prefetch);
2319 EXPORT_SYMBOL(dmu_free_range);
2320 EXPORT_SYMBOL(dmu_free_long_range);
2321 EXPORT_SYMBOL(dmu_free_long_object);
2322 EXPORT_SYMBOL(dmu_read);
2323 EXPORT_SYMBOL(dmu_read_by_dnode);
2324 EXPORT_SYMBOL(dmu_write);
2325 EXPORT_SYMBOL(dmu_write_by_dnode);
2326 EXPORT_SYMBOL(dmu_prealloc);
2327 EXPORT_SYMBOL(dmu_object_info);
2328 EXPORT_SYMBOL(dmu_object_info_from_dnode);
2329 EXPORT_SYMBOL(dmu_object_info_from_db);
2330 EXPORT_SYMBOL(dmu_object_size_from_db);
2331 EXPORT_SYMBOL(dmu_object_dnsize_from_db);
2332 EXPORT_SYMBOL(dmu_object_set_nlevels);
2333 EXPORT_SYMBOL(dmu_object_set_blocksize);
2334 EXPORT_SYMBOL(dmu_object_set_maxblkid);
2335 EXPORT_SYMBOL(dmu_object_set_checksum);
2336 EXPORT_SYMBOL(dmu_object_set_compress);
2337 EXPORT_SYMBOL(dmu_offset_next);
2338 EXPORT_SYMBOL(dmu_write_policy);
2339 EXPORT_SYMBOL(dmu_sync);
2340 EXPORT_SYMBOL(dmu_request_arcbuf);
2341 EXPORT_SYMBOL(dmu_return_arcbuf);
2342 EXPORT_SYMBOL(dmu_assign_arcbuf_by_dnode);
2343 EXPORT_SYMBOL(dmu_assign_arcbuf_by_dbuf);
2344 EXPORT_SYMBOL(dmu_buf_hold);
2345 EXPORT_SYMBOL(dmu_ot);
2346
2347 /* BEGIN CSTYLED */
2348 ZFS_MODULE_PARAM(zfs, zfs_, nopwrite_enabled, INT, ZMOD_RW,
2349 "Enable NOP writes");
2350
2351 ZFS_MODULE_PARAM(zfs, zfs_, per_txg_dirty_frees_percent, ULONG, ZMOD_RW,
2352 "Percentage of dirtied blocks from frees in one TXG");
2353
2354 ZFS_MODULE_PARAM(zfs, zfs_, dmu_offset_next_sync, INT, ZMOD_RW,
2355 "Enable forcing txg sync to find holes");
2356
2357 ZFS_MODULE_PARAM(zfs, , dmu_prefetch_max, INT, ZMOD_RW,
2358 "Limit one prefetch call to this size");
2359 /* END CSTYLED */
2360