1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23  * Copyright 2011 Nexenta Systems, Inc.  All rights reserved.
24  * Copyright (c) 2012, 2020 by Delphix. All rights reserved.
25  * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
26  * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
27  * Copyright (c) 2019, Klara Inc.
28  * Copyright (c) 2019, Allan Jude
29  */
30 
31 #include <sys/zfs_context.h>
32 #include <sys/arc.h>
33 #include <sys/dmu.h>
34 #include <sys/dmu_send.h>
35 #include <sys/dmu_impl.h>
36 #include <sys/dbuf.h>
37 #include <sys/dmu_objset.h>
38 #include <sys/dsl_dataset.h>
39 #include <sys/dsl_dir.h>
40 #include <sys/dmu_tx.h>
41 #include <sys/spa.h>
42 #include <sys/zio.h>
43 #include <sys/dmu_zfetch.h>
44 #include <sys/sa.h>
45 #include <sys/sa_impl.h>
46 #include <sys/zfeature.h>
47 #include <sys/blkptr.h>
48 #include <sys/range_tree.h>
49 #include <sys/trace_zfs.h>
50 #include <sys/callb.h>
51 #include <sys/abd.h>
52 #include <sys/vdev.h>
53 #include <cityhash.h>
54 #include <sys/spa_impl.h>
55 #include <sys/wmsum.h>
56 
57 kstat_t *dbuf_ksp;
58 
59 typedef struct dbuf_stats {
60 	/*
61 	 * Various statistics about the size of the dbuf cache.
62 	 */
63 	kstat_named_t cache_count;
64 	kstat_named_t cache_size_bytes;
65 	kstat_named_t cache_size_bytes_max;
66 	/*
67 	 * Statistics regarding the bounds on the dbuf cache size.
68 	 */
69 	kstat_named_t cache_target_bytes;
70 	kstat_named_t cache_lowater_bytes;
71 	kstat_named_t cache_hiwater_bytes;
72 	/*
73 	 * Total number of dbuf cache evictions that have occurred.
74 	 */
75 	kstat_named_t cache_total_evicts;
76 	/*
77 	 * The distribution of dbuf levels in the dbuf cache and
78 	 * the total size of all dbufs at each level.
79 	 */
80 	kstat_named_t cache_levels[DN_MAX_LEVELS];
81 	kstat_named_t cache_levels_bytes[DN_MAX_LEVELS];
82 	/*
83 	 * Statistics about the dbuf hash table.
84 	 */
85 	kstat_named_t hash_hits;
86 	kstat_named_t hash_misses;
87 	kstat_named_t hash_collisions;
88 	kstat_named_t hash_elements;
89 	kstat_named_t hash_elements_max;
90 	/*
91 	 * Number of sublists containing more than one dbuf in the dbuf
92 	 * hash table. Keep track of the longest hash chain.
93 	 */
94 	kstat_named_t hash_chains;
95 	kstat_named_t hash_chain_max;
96 	/*
97 	 * Number of times a dbuf_create() discovers that a dbuf was
98 	 * already created and in the dbuf hash table.
99 	 */
100 	kstat_named_t hash_insert_race;
101 	/*
102 	 * Statistics about the size of the metadata dbuf cache.
103 	 */
104 	kstat_named_t metadata_cache_count;
105 	kstat_named_t metadata_cache_size_bytes;
106 	kstat_named_t metadata_cache_size_bytes_max;
107 	/*
108 	 * For diagnostic purposes, this is incremented whenever we can't add
109 	 * something to the metadata cache because it's full, and instead put
110 	 * the data in the regular dbuf cache.
111 	 */
112 	kstat_named_t metadata_cache_overflow;
113 } dbuf_stats_t;
114 
115 dbuf_stats_t dbuf_stats = {
116 	{ "cache_count",			KSTAT_DATA_UINT64 },
117 	{ "cache_size_bytes",			KSTAT_DATA_UINT64 },
118 	{ "cache_size_bytes_max",		KSTAT_DATA_UINT64 },
119 	{ "cache_target_bytes",			KSTAT_DATA_UINT64 },
120 	{ "cache_lowater_bytes",		KSTAT_DATA_UINT64 },
121 	{ "cache_hiwater_bytes",		KSTAT_DATA_UINT64 },
122 	{ "cache_total_evicts",			KSTAT_DATA_UINT64 },
123 	{ { "cache_levels_N",			KSTAT_DATA_UINT64 } },
124 	{ { "cache_levels_bytes_N",		KSTAT_DATA_UINT64 } },
125 	{ "hash_hits",				KSTAT_DATA_UINT64 },
126 	{ "hash_misses",			KSTAT_DATA_UINT64 },
127 	{ "hash_collisions",			KSTAT_DATA_UINT64 },
128 	{ "hash_elements",			KSTAT_DATA_UINT64 },
129 	{ "hash_elements_max",			KSTAT_DATA_UINT64 },
130 	{ "hash_chains",			KSTAT_DATA_UINT64 },
131 	{ "hash_chain_max",			KSTAT_DATA_UINT64 },
132 	{ "hash_insert_race",			KSTAT_DATA_UINT64 },
133 	{ "metadata_cache_count",		KSTAT_DATA_UINT64 },
134 	{ "metadata_cache_size_bytes",		KSTAT_DATA_UINT64 },
135 	{ "metadata_cache_size_bytes_max",	KSTAT_DATA_UINT64 },
136 	{ "metadata_cache_overflow",		KSTAT_DATA_UINT64 }
137 };
138 
139 struct {
140 	wmsum_t cache_count;
141 	wmsum_t cache_total_evicts;
142 	wmsum_t cache_levels[DN_MAX_LEVELS];
143 	wmsum_t cache_levels_bytes[DN_MAX_LEVELS];
144 	wmsum_t hash_hits;
145 	wmsum_t hash_misses;
146 	wmsum_t hash_collisions;
147 	wmsum_t hash_chains;
148 	wmsum_t hash_insert_race;
149 	wmsum_t metadata_cache_count;
150 	wmsum_t metadata_cache_overflow;
151 } dbuf_sums;
152 
153 #define	DBUF_STAT_INCR(stat, val)	\
154 	wmsum_add(&dbuf_sums.stat, val);
155 #define	DBUF_STAT_DECR(stat, val)	\
156 	DBUF_STAT_INCR(stat, -(val));
157 #define	DBUF_STAT_BUMP(stat)		\
158 	DBUF_STAT_INCR(stat, 1);
159 #define	DBUF_STAT_BUMPDOWN(stat)	\
160 	DBUF_STAT_INCR(stat, -1);
161 #define	DBUF_STAT_MAX(stat, v) {					\
162 	uint64_t _m;							\
163 	while ((v) > (_m = dbuf_stats.stat.value.ui64) &&		\
164 	    (_m != atomic_cas_64(&dbuf_stats.stat.value.ui64, _m, (v))))\
165 		continue;						\
166 }
167 
168 static boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx);
169 static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx);
170 static void dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr);
171 static int dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, uint32_t flags);
172 
173 /*
174  * Global data structures and functions for the dbuf cache.
175  */
176 static kmem_cache_t *dbuf_kmem_cache;
177 static taskq_t *dbu_evict_taskq;
178 
179 static kthread_t *dbuf_cache_evict_thread;
180 static kmutex_t dbuf_evict_lock;
181 static kcondvar_t dbuf_evict_cv;
182 static boolean_t dbuf_evict_thread_exit;
183 
184 /*
185  * There are two dbuf caches; each dbuf can only be in one of them at a time.
186  *
187  * 1. Cache of metadata dbufs, to help make read-heavy administrative commands
188  *    from /sbin/zfs run faster. The "metadata cache" specifically stores dbufs
189  *    that represent the metadata that describes filesystems/snapshots/
190  *    bookmarks/properties/etc. We only evict from this cache when we export a
191  *    pool, to short-circuit as much I/O as possible for all administrative
192  *    commands that need the metadata. There is no eviction policy for this
193  *    cache, because we try to only include types in it which would occupy a
194  *    very small amount of space per object but create a large impact on the
195  *    performance of these commands. Instead, after it reaches a maximum size
196  *    (which should only happen on very small memory systems with a very large
197  *    number of filesystem objects), we stop taking new dbufs into the
198  *    metadata cache, instead putting them in the normal dbuf cache.
199  *
200  * 2. LRU cache of dbufs. The dbuf cache maintains a list of dbufs that
201  *    are not currently held but have been recently released. These dbufs
202  *    are not eligible for arc eviction until they are aged out of the cache.
203  *    Dbufs that are aged out of the cache will be immediately destroyed and
204  *    become eligible for arc eviction.
205  *
206  * Dbufs are added to these caches once the last hold is released. If a dbuf is
207  * later accessed and still exists in the dbuf cache, then it will be removed
208  * from the cache and later re-added to the head of the cache.
209  *
210  * If a given dbuf meets the requirements for the metadata cache, it will go
211  * there, otherwise it will be considered for the generic LRU dbuf cache. The
212  * caches and the refcounts tracking their sizes are stored in an array indexed
213  * by those caches' matching enum values (from dbuf_cached_state_t).
214  */
215 typedef struct dbuf_cache {
216 	multilist_t cache;
217 	zfs_refcount_t size ____cacheline_aligned;
218 } dbuf_cache_t;
219 dbuf_cache_t dbuf_caches[DB_CACHE_MAX];
220 
221 /* Size limits for the caches */
222 unsigned long dbuf_cache_max_bytes = ULONG_MAX;
223 unsigned long dbuf_metadata_cache_max_bytes = ULONG_MAX;
224 
225 /* Set the default sizes of the caches to log2 fraction of arc size */
226 int dbuf_cache_shift = 5;
227 int dbuf_metadata_cache_shift = 6;
228 
229 static unsigned long dbuf_cache_target_bytes(void);
230 static unsigned long dbuf_metadata_cache_target_bytes(void);
231 
232 /*
233  * The LRU dbuf cache uses a three-stage eviction policy:
234  *	- A low water marker designates when the dbuf eviction thread
235  *	should stop evicting from the dbuf cache.
236  *	- When we reach the maximum size (aka mid water mark), we
237  *	signal the eviction thread to run.
238  *	- The high water mark indicates when the eviction thread
239  *	is unable to keep up with the incoming load and eviction must
240  *	happen in the context of the calling thread.
241  *
242  * The dbuf cache:
243  *                                                 (max size)
244  *                                      low water   mid water   hi water
245  * +----------------------------------------+----------+----------+
246  * |                                        |          |          |
247  * |                                        |          |          |
248  * |                                        |          |          |
249  * |                                        |          |          |
250  * +----------------------------------------+----------+----------+
251  *                                        stop        signal     evict
252  *                                      evicting     eviction   directly
253  *                                                    thread
254  *
255  * The high and low water marks indicate the operating range for the eviction
256  * thread. The low water mark is, by default, 90% of the total size of the
257  * cache and the high water mark is at 110% (both of these percentages can be
258  * changed by setting dbuf_cache_lowater_pct and dbuf_cache_hiwater_pct,
259  * respectively). The eviction thread will try to ensure that the cache remains
260  * within this range by waking up every second and checking if the cache is
261  * above the low water mark. The thread can also be woken up by callers adding
262  * elements into the cache if the cache is larger than the mid water (i.e max
263  * cache size). Once the eviction thread is woken up and eviction is required,
264  * it will continue evicting buffers until it's able to reduce the cache size
265  * to the low water mark. If the cache size continues to grow and hits the high
266  * water mark, then callers adding elements to the cache will begin to evict
267  * directly from the cache until the cache is no longer above the high water
268  * mark.
269  */
270 
271 /*
272  * The percentage above and below the maximum cache size.
273  */
274 uint_t dbuf_cache_hiwater_pct = 10;
275 uint_t dbuf_cache_lowater_pct = 10;
276 
277 static int
dbuf_cons(void * vdb,void * unused,int kmflag)278 dbuf_cons(void *vdb, void *unused, int kmflag)
279 {
280 	(void) unused, (void) kmflag;
281 	dmu_buf_impl_t *db = vdb;
282 	bzero(db, sizeof (dmu_buf_impl_t));
283 
284 	mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL);
285 	rw_init(&db->db_rwlock, NULL, RW_DEFAULT, NULL);
286 	cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
287 	multilist_link_init(&db->db_cache_link);
288 	zfs_refcount_create(&db->db_holds);
289 
290 	return (0);
291 }
292 
293 static void
dbuf_dest(void * vdb,void * unused)294 dbuf_dest(void *vdb, void *unused)
295 {
296 	(void) unused;
297 	dmu_buf_impl_t *db = vdb;
298 	mutex_destroy(&db->db_mtx);
299 	rw_destroy(&db->db_rwlock);
300 	cv_destroy(&db->db_changed);
301 	ASSERT(!multilist_link_active(&db->db_cache_link));
302 	zfs_refcount_destroy(&db->db_holds);
303 }
304 
305 /*
306  * dbuf hash table routines
307  */
308 static dbuf_hash_table_t dbuf_hash_table;
309 
310 /*
311  * We use Cityhash for this. It's fast, and has good hash properties without
312  * requiring any large static buffers.
313  */
314 static uint64_t
dbuf_hash(void * os,uint64_t obj,uint8_t lvl,uint64_t blkid)315 dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid)
316 {
317 	return (cityhash4((uintptr_t)os, obj, (uint64_t)lvl, blkid));
318 }
319 
320 #define	DTRACE_SET_STATE(db, why) \
321 	DTRACE_PROBE2(dbuf__state_change, dmu_buf_impl_t *, db,	\
322 	    const char *, why)
323 
324 #define	DBUF_EQUAL(dbuf, os, obj, level, blkid)		\
325 	((dbuf)->db.db_object == (obj) &&		\
326 	(dbuf)->db_objset == (os) &&			\
327 	(dbuf)->db_level == (level) &&			\
328 	(dbuf)->db_blkid == (blkid))
329 
330 dmu_buf_impl_t *
dbuf_find(objset_t * os,uint64_t obj,uint8_t level,uint64_t blkid)331 dbuf_find(objset_t *os, uint64_t obj, uint8_t level, uint64_t blkid)
332 {
333 	dbuf_hash_table_t *h = &dbuf_hash_table;
334 	uint64_t hv;
335 	uint64_t idx;
336 	dmu_buf_impl_t *db;
337 
338 	hv = dbuf_hash(os, obj, level, blkid);
339 	idx = hv & h->hash_table_mask;
340 
341 	mutex_enter(DBUF_HASH_MUTEX(h, idx));
342 	for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) {
343 		if (DBUF_EQUAL(db, os, obj, level, blkid)) {
344 			mutex_enter(&db->db_mtx);
345 			if (db->db_state != DB_EVICTING) {
346 				mutex_exit(DBUF_HASH_MUTEX(h, idx));
347 				return (db);
348 			}
349 			mutex_exit(&db->db_mtx);
350 		}
351 	}
352 	mutex_exit(DBUF_HASH_MUTEX(h, idx));
353 	return (NULL);
354 }
355 
356 static dmu_buf_impl_t *
dbuf_find_bonus(objset_t * os,uint64_t object)357 dbuf_find_bonus(objset_t *os, uint64_t object)
358 {
359 	dnode_t *dn;
360 	dmu_buf_impl_t *db = NULL;
361 
362 	if (dnode_hold(os, object, FTAG, &dn) == 0) {
363 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
364 		if (dn->dn_bonus != NULL) {
365 			db = dn->dn_bonus;
366 			mutex_enter(&db->db_mtx);
367 		}
368 		rw_exit(&dn->dn_struct_rwlock);
369 		dnode_rele(dn, FTAG);
370 	}
371 	return (db);
372 }
373 
374 /*
375  * Insert an entry into the hash table.  If there is already an element
376  * equal to elem in the hash table, then the already existing element
377  * will be returned and the new element will not be inserted.
378  * Otherwise returns NULL.
379  */
380 static dmu_buf_impl_t *
dbuf_hash_insert(dmu_buf_impl_t * db)381 dbuf_hash_insert(dmu_buf_impl_t *db)
382 {
383 	dbuf_hash_table_t *h = &dbuf_hash_table;
384 	objset_t *os = db->db_objset;
385 	uint64_t obj = db->db.db_object;
386 	int level = db->db_level;
387 	uint64_t blkid, hv, idx;
388 	dmu_buf_impl_t *dbf;
389 	uint32_t i;
390 
391 	blkid = db->db_blkid;
392 	hv = dbuf_hash(os, obj, level, blkid);
393 	idx = hv & h->hash_table_mask;
394 
395 	mutex_enter(DBUF_HASH_MUTEX(h, idx));
396 	for (dbf = h->hash_table[idx], i = 0; dbf != NULL;
397 	    dbf = dbf->db_hash_next, i++) {
398 		if (DBUF_EQUAL(dbf, os, obj, level, blkid)) {
399 			mutex_enter(&dbf->db_mtx);
400 			if (dbf->db_state != DB_EVICTING) {
401 				mutex_exit(DBUF_HASH_MUTEX(h, idx));
402 				return (dbf);
403 			}
404 			mutex_exit(&dbf->db_mtx);
405 		}
406 	}
407 
408 	if (i > 0) {
409 		DBUF_STAT_BUMP(hash_collisions);
410 		if (i == 1)
411 			DBUF_STAT_BUMP(hash_chains);
412 
413 		DBUF_STAT_MAX(hash_chain_max, i);
414 	}
415 
416 	mutex_enter(&db->db_mtx);
417 	db->db_hash_next = h->hash_table[idx];
418 	h->hash_table[idx] = db;
419 	mutex_exit(DBUF_HASH_MUTEX(h, idx));
420 	uint64_t he = atomic_inc_64_nv(&dbuf_stats.hash_elements.value.ui64);
421 	DBUF_STAT_MAX(hash_elements_max, he);
422 
423 	return (NULL);
424 }
425 
426 /*
427  * This returns whether this dbuf should be stored in the metadata cache, which
428  * is based on whether it's from one of the dnode types that store data related
429  * to traversing dataset hierarchies.
430  */
431 static boolean_t
dbuf_include_in_metadata_cache(dmu_buf_impl_t * db)432 dbuf_include_in_metadata_cache(dmu_buf_impl_t *db)
433 {
434 	DB_DNODE_ENTER(db);
435 	dmu_object_type_t type = DB_DNODE(db)->dn_type;
436 	DB_DNODE_EXIT(db);
437 
438 	/* Check if this dbuf is one of the types we care about */
439 	if (DMU_OT_IS_METADATA_CACHED(type)) {
440 		/* If we hit this, then we set something up wrong in dmu_ot */
441 		ASSERT(DMU_OT_IS_METADATA(type));
442 
443 		/*
444 		 * Sanity check for small-memory systems: don't allocate too
445 		 * much memory for this purpose.
446 		 */
447 		if (zfs_refcount_count(
448 		    &dbuf_caches[DB_DBUF_METADATA_CACHE].size) >
449 		    dbuf_metadata_cache_target_bytes()) {
450 			DBUF_STAT_BUMP(metadata_cache_overflow);
451 			return (B_FALSE);
452 		}
453 
454 		return (B_TRUE);
455 	}
456 
457 	return (B_FALSE);
458 }
459 
460 /*
461  * Remove an entry from the hash table.  It must be in the EVICTING state.
462  */
463 static void
dbuf_hash_remove(dmu_buf_impl_t * db)464 dbuf_hash_remove(dmu_buf_impl_t *db)
465 {
466 	dbuf_hash_table_t *h = &dbuf_hash_table;
467 	uint64_t hv, idx;
468 	dmu_buf_impl_t *dbf, **dbp;
469 
470 	hv = dbuf_hash(db->db_objset, db->db.db_object,
471 	    db->db_level, db->db_blkid);
472 	idx = hv & h->hash_table_mask;
473 
474 	/*
475 	 * We mustn't hold db_mtx to maintain lock ordering:
476 	 * DBUF_HASH_MUTEX > db_mtx.
477 	 */
478 	ASSERT(zfs_refcount_is_zero(&db->db_holds));
479 	ASSERT(db->db_state == DB_EVICTING);
480 	ASSERT(!MUTEX_HELD(&db->db_mtx));
481 
482 	mutex_enter(DBUF_HASH_MUTEX(h, idx));
483 	dbp = &h->hash_table[idx];
484 	while ((dbf = *dbp) != db) {
485 		dbp = &dbf->db_hash_next;
486 		ASSERT(dbf != NULL);
487 	}
488 	*dbp = db->db_hash_next;
489 	db->db_hash_next = NULL;
490 	if (h->hash_table[idx] &&
491 	    h->hash_table[idx]->db_hash_next == NULL)
492 		DBUF_STAT_BUMPDOWN(hash_chains);
493 	mutex_exit(DBUF_HASH_MUTEX(h, idx));
494 	atomic_dec_64(&dbuf_stats.hash_elements.value.ui64);
495 }
496 
497 typedef enum {
498 	DBVU_EVICTING,
499 	DBVU_NOT_EVICTING
500 } dbvu_verify_type_t;
501 
502 static void
dbuf_verify_user(dmu_buf_impl_t * db,dbvu_verify_type_t verify_type)503 dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type)
504 {
505 #ifdef ZFS_DEBUG
506 	int64_t holds;
507 
508 	if (db->db_user == NULL)
509 		return;
510 
511 	/* Only data blocks support the attachment of user data. */
512 	ASSERT(db->db_level == 0);
513 
514 	/* Clients must resolve a dbuf before attaching user data. */
515 	ASSERT(db->db.db_data != NULL);
516 	ASSERT3U(db->db_state, ==, DB_CACHED);
517 
518 	holds = zfs_refcount_count(&db->db_holds);
519 	if (verify_type == DBVU_EVICTING) {
520 		/*
521 		 * Immediate eviction occurs when holds == dirtycnt.
522 		 * For normal eviction buffers, holds is zero on
523 		 * eviction, except when dbuf_fix_old_data() calls
524 		 * dbuf_clear_data().  However, the hold count can grow
525 		 * during eviction even though db_mtx is held (see
526 		 * dmu_bonus_hold() for an example), so we can only
527 		 * test the generic invariant that holds >= dirtycnt.
528 		 */
529 		ASSERT3U(holds, >=, db->db_dirtycnt);
530 	} else {
531 		if (db->db_user_immediate_evict == TRUE)
532 			ASSERT3U(holds, >=, db->db_dirtycnt);
533 		else
534 			ASSERT3U(holds, >, 0);
535 	}
536 #endif
537 }
538 
539 static void
dbuf_evict_user(dmu_buf_impl_t * db)540 dbuf_evict_user(dmu_buf_impl_t *db)
541 {
542 	dmu_buf_user_t *dbu = db->db_user;
543 
544 	ASSERT(MUTEX_HELD(&db->db_mtx));
545 
546 	if (dbu == NULL)
547 		return;
548 
549 	dbuf_verify_user(db, DBVU_EVICTING);
550 	db->db_user = NULL;
551 
552 #ifdef ZFS_DEBUG
553 	if (dbu->dbu_clear_on_evict_dbufp != NULL)
554 		*dbu->dbu_clear_on_evict_dbufp = NULL;
555 #endif
556 
557 	/*
558 	 * There are two eviction callbacks - one that we call synchronously
559 	 * and one that we invoke via a taskq.  The async one is useful for
560 	 * avoiding lock order reversals and limiting stack depth.
561 	 *
562 	 * Note that if we have a sync callback but no async callback,
563 	 * it's likely that the sync callback will free the structure
564 	 * containing the dbu.  In that case we need to take care to not
565 	 * dereference dbu after calling the sync evict func.
566 	 */
567 	boolean_t has_async = (dbu->dbu_evict_func_async != NULL);
568 
569 	if (dbu->dbu_evict_func_sync != NULL)
570 		dbu->dbu_evict_func_sync(dbu);
571 
572 	if (has_async) {
573 		taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func_async,
574 		    dbu, 0, &dbu->dbu_tqent);
575 	}
576 }
577 
578 boolean_t
dbuf_is_metadata(dmu_buf_impl_t * db)579 dbuf_is_metadata(dmu_buf_impl_t *db)
580 {
581 	/*
582 	 * Consider indirect blocks and spill blocks to be meta data.
583 	 */
584 	if (db->db_level > 0 || db->db_blkid == DMU_SPILL_BLKID) {
585 		return (B_TRUE);
586 	} else {
587 		boolean_t is_metadata;
588 
589 		DB_DNODE_ENTER(db);
590 		is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type);
591 		DB_DNODE_EXIT(db);
592 
593 		return (is_metadata);
594 	}
595 }
596 
597 
598 /*
599  * This function *must* return indices evenly distributed between all
600  * sublists of the multilist. This is needed due to how the dbuf eviction
601  * code is laid out; dbuf_evict_thread() assumes dbufs are evenly
602  * distributed between all sublists and uses this assumption when
603  * deciding which sublist to evict from and how much to evict from it.
604  */
605 static unsigned int
dbuf_cache_multilist_index_func(multilist_t * ml,void * obj)606 dbuf_cache_multilist_index_func(multilist_t *ml, void *obj)
607 {
608 	dmu_buf_impl_t *db = obj;
609 
610 	/*
611 	 * The assumption here, is the hash value for a given
612 	 * dmu_buf_impl_t will remain constant throughout it's lifetime
613 	 * (i.e. it's objset, object, level and blkid fields don't change).
614 	 * Thus, we don't need to store the dbuf's sublist index
615 	 * on insertion, as this index can be recalculated on removal.
616 	 *
617 	 * Also, the low order bits of the hash value are thought to be
618 	 * distributed evenly. Otherwise, in the case that the multilist
619 	 * has a power of two number of sublists, each sublists' usage
620 	 * would not be evenly distributed. In this context full 64bit
621 	 * division would be a waste of time, so limit it to 32 bits.
622 	 */
623 	return ((unsigned int)dbuf_hash(db->db_objset, db->db.db_object,
624 	    db->db_level, db->db_blkid) %
625 	    multilist_get_num_sublists(ml));
626 }
627 
628 /*
629  * The target size of the dbuf cache can grow with the ARC target,
630  * unless limited by the tunable dbuf_cache_max_bytes.
631  */
632 static inline unsigned long
dbuf_cache_target_bytes(void)633 dbuf_cache_target_bytes(void)
634 {
635 	return (MIN(dbuf_cache_max_bytes,
636 	    arc_target_bytes() >> dbuf_cache_shift));
637 }
638 
639 /*
640  * The target size of the dbuf metadata cache can grow with the ARC target,
641  * unless limited by the tunable dbuf_metadata_cache_max_bytes.
642  */
643 static inline unsigned long
dbuf_metadata_cache_target_bytes(void)644 dbuf_metadata_cache_target_bytes(void)
645 {
646 	return (MIN(dbuf_metadata_cache_max_bytes,
647 	    arc_target_bytes() >> dbuf_metadata_cache_shift));
648 }
649 
650 static inline uint64_t
dbuf_cache_hiwater_bytes(void)651 dbuf_cache_hiwater_bytes(void)
652 {
653 	uint64_t dbuf_cache_target = dbuf_cache_target_bytes();
654 	return (dbuf_cache_target +
655 	    (dbuf_cache_target * dbuf_cache_hiwater_pct) / 100);
656 }
657 
658 static inline uint64_t
dbuf_cache_lowater_bytes(void)659 dbuf_cache_lowater_bytes(void)
660 {
661 	uint64_t dbuf_cache_target = dbuf_cache_target_bytes();
662 	return (dbuf_cache_target -
663 	    (dbuf_cache_target * dbuf_cache_lowater_pct) / 100);
664 }
665 
666 static inline boolean_t
dbuf_cache_above_lowater(void)667 dbuf_cache_above_lowater(void)
668 {
669 	return (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
670 	    dbuf_cache_lowater_bytes());
671 }
672 
673 /*
674  * Evict the oldest eligible dbuf from the dbuf cache.
675  */
676 static void
dbuf_evict_one(void)677 dbuf_evict_one(void)
678 {
679 	int idx = multilist_get_random_index(&dbuf_caches[DB_DBUF_CACHE].cache);
680 	multilist_sublist_t *mls = multilist_sublist_lock(
681 	    &dbuf_caches[DB_DBUF_CACHE].cache, idx);
682 
683 	ASSERT(!MUTEX_HELD(&dbuf_evict_lock));
684 
685 	dmu_buf_impl_t *db = multilist_sublist_tail(mls);
686 	while (db != NULL && mutex_tryenter(&db->db_mtx) == 0) {
687 		db = multilist_sublist_prev(mls, db);
688 	}
689 
690 	DTRACE_PROBE2(dbuf__evict__one, dmu_buf_impl_t *, db,
691 	    multilist_sublist_t *, mls);
692 
693 	if (db != NULL) {
694 		multilist_sublist_remove(mls, db);
695 		multilist_sublist_unlock(mls);
696 		(void) zfs_refcount_remove_many(
697 		    &dbuf_caches[DB_DBUF_CACHE].size, db->db.db_size, db);
698 		DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
699 		DBUF_STAT_BUMPDOWN(cache_count);
700 		DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
701 		    db->db.db_size);
702 		ASSERT3U(db->db_caching_status, ==, DB_DBUF_CACHE);
703 		db->db_caching_status = DB_NO_CACHE;
704 		dbuf_destroy(db);
705 		DBUF_STAT_BUMP(cache_total_evicts);
706 	} else {
707 		multilist_sublist_unlock(mls);
708 	}
709 }
710 
711 /*
712  * The dbuf evict thread is responsible for aging out dbufs from the
713  * cache. Once the cache has reached it's maximum size, dbufs are removed
714  * and destroyed. The eviction thread will continue running until the size
715  * of the dbuf cache is at or below the maximum size. Once the dbuf is aged
716  * out of the cache it is destroyed and becomes eligible for arc eviction.
717  */
718 static void
dbuf_evict_thread(void * unused)719 dbuf_evict_thread(void *unused)
720 {
721 	(void) unused;
722 	callb_cpr_t cpr;
723 
724 	CALLB_CPR_INIT(&cpr, &dbuf_evict_lock, callb_generic_cpr, FTAG);
725 
726 	mutex_enter(&dbuf_evict_lock);
727 	while (!dbuf_evict_thread_exit) {
728 		while (!dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) {
729 			CALLB_CPR_SAFE_BEGIN(&cpr);
730 			(void) cv_timedwait_idle_hires(&dbuf_evict_cv,
731 			    &dbuf_evict_lock, SEC2NSEC(1), MSEC2NSEC(1), 0);
732 			CALLB_CPR_SAFE_END(&cpr, &dbuf_evict_lock);
733 		}
734 		mutex_exit(&dbuf_evict_lock);
735 
736 		/*
737 		 * Keep evicting as long as we're above the low water mark
738 		 * for the cache. We do this without holding the locks to
739 		 * minimize lock contention.
740 		 */
741 		while (dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) {
742 			dbuf_evict_one();
743 		}
744 
745 		mutex_enter(&dbuf_evict_lock);
746 	}
747 
748 	dbuf_evict_thread_exit = B_FALSE;
749 	cv_broadcast(&dbuf_evict_cv);
750 	CALLB_CPR_EXIT(&cpr);	/* drops dbuf_evict_lock */
751 	thread_exit();
752 }
753 
754 /*
755  * Wake up the dbuf eviction thread if the dbuf cache is at its max size.
756  * If the dbuf cache is at its high water mark, then evict a dbuf from the
757  * dbuf cache using the callers context.
758  */
759 static void
dbuf_evict_notify(uint64_t size)760 dbuf_evict_notify(uint64_t size)
761 {
762 	/*
763 	 * We check if we should evict without holding the dbuf_evict_lock,
764 	 * because it's OK to occasionally make the wrong decision here,
765 	 * and grabbing the lock results in massive lock contention.
766 	 */
767 	if (size > dbuf_cache_target_bytes()) {
768 		if (size > dbuf_cache_hiwater_bytes())
769 			dbuf_evict_one();
770 		cv_signal(&dbuf_evict_cv);
771 	}
772 }
773 
774 static int
dbuf_kstat_update(kstat_t * ksp,int rw)775 dbuf_kstat_update(kstat_t *ksp, int rw)
776 {
777 	dbuf_stats_t *ds = ksp->ks_data;
778 
779 	if (rw == KSTAT_WRITE)
780 		return (SET_ERROR(EACCES));
781 
782 	ds->cache_count.value.ui64 =
783 	    wmsum_value(&dbuf_sums.cache_count);
784 	ds->cache_size_bytes.value.ui64 =
785 	    zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size);
786 	ds->cache_target_bytes.value.ui64 = dbuf_cache_target_bytes();
787 	ds->cache_hiwater_bytes.value.ui64 = dbuf_cache_hiwater_bytes();
788 	ds->cache_lowater_bytes.value.ui64 = dbuf_cache_lowater_bytes();
789 	ds->cache_total_evicts.value.ui64 =
790 	    wmsum_value(&dbuf_sums.cache_total_evicts);
791 	for (int i = 0; i < DN_MAX_LEVELS; i++) {
792 		ds->cache_levels[i].value.ui64 =
793 		    wmsum_value(&dbuf_sums.cache_levels[i]);
794 		ds->cache_levels_bytes[i].value.ui64 =
795 		    wmsum_value(&dbuf_sums.cache_levels_bytes[i]);
796 	}
797 	ds->hash_hits.value.ui64 =
798 	    wmsum_value(&dbuf_sums.hash_hits);
799 	ds->hash_misses.value.ui64 =
800 	    wmsum_value(&dbuf_sums.hash_misses);
801 	ds->hash_collisions.value.ui64 =
802 	    wmsum_value(&dbuf_sums.hash_collisions);
803 	ds->hash_chains.value.ui64 =
804 	    wmsum_value(&dbuf_sums.hash_chains);
805 	ds->hash_insert_race.value.ui64 =
806 	    wmsum_value(&dbuf_sums.hash_insert_race);
807 	ds->metadata_cache_count.value.ui64 =
808 	    wmsum_value(&dbuf_sums.metadata_cache_count);
809 	ds->metadata_cache_size_bytes.value.ui64 = zfs_refcount_count(
810 	    &dbuf_caches[DB_DBUF_METADATA_CACHE].size);
811 	ds->metadata_cache_overflow.value.ui64 =
812 	    wmsum_value(&dbuf_sums.metadata_cache_overflow);
813 	return (0);
814 }
815 
816 void
dbuf_init(void)817 dbuf_init(void)
818 {
819 	uint64_t hsize = 1ULL << 16;
820 	dbuf_hash_table_t *h = &dbuf_hash_table;
821 	int i;
822 
823 	/*
824 	 * The hash table is big enough to fill one eighth of physical memory
825 	 * with an average block size of zfs_arc_average_blocksize (default 8K).
826 	 * By default, the table will take up
827 	 * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers).
828 	 */
829 	while (hsize * zfs_arc_average_blocksize < arc_all_memory() / 8)
830 		hsize <<= 1;
831 
832 retry:
833 	h->hash_table_mask = hsize - 1;
834 #if defined(_KERNEL)
835 	/*
836 	 * Large allocations which do not require contiguous pages
837 	 * should be using vmem_alloc() in the linux kernel
838 	 */
839 	h->hash_table = vmem_zalloc(hsize * sizeof (void *), KM_SLEEP);
840 #else
841 	h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP);
842 #endif
843 	if (h->hash_table == NULL) {
844 		/* XXX - we should really return an error instead of assert */
845 		ASSERT(hsize > (1ULL << 10));
846 		hsize >>= 1;
847 		goto retry;
848 	}
849 
850 	dbuf_kmem_cache = kmem_cache_create("dmu_buf_impl_t",
851 	    sizeof (dmu_buf_impl_t),
852 	    0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0);
853 
854 	for (i = 0; i < DBUF_MUTEXES; i++)
855 		mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL);
856 
857 	dbuf_stats_init(h);
858 
859 	/*
860 	 * All entries are queued via taskq_dispatch_ent(), so min/maxalloc
861 	 * configuration is not required.
862 	 */
863 	dbu_evict_taskq = taskq_create("dbu_evict", 1, defclsyspri, 0, 0, 0);
864 
865 	for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) {
866 		multilist_create(&dbuf_caches[dcs].cache,
867 		    sizeof (dmu_buf_impl_t),
868 		    offsetof(dmu_buf_impl_t, db_cache_link),
869 		    dbuf_cache_multilist_index_func);
870 		zfs_refcount_create(&dbuf_caches[dcs].size);
871 	}
872 
873 	dbuf_evict_thread_exit = B_FALSE;
874 	mutex_init(&dbuf_evict_lock, NULL, MUTEX_DEFAULT, NULL);
875 	cv_init(&dbuf_evict_cv, NULL, CV_DEFAULT, NULL);
876 	dbuf_cache_evict_thread = thread_create(NULL, 0, dbuf_evict_thread,
877 	    NULL, 0, &p0, TS_RUN, minclsyspri);
878 
879 	wmsum_init(&dbuf_sums.cache_count, 0);
880 	wmsum_init(&dbuf_sums.cache_total_evicts, 0);
881 	for (i = 0; i < DN_MAX_LEVELS; i++) {
882 		wmsum_init(&dbuf_sums.cache_levels[i], 0);
883 		wmsum_init(&dbuf_sums.cache_levels_bytes[i], 0);
884 	}
885 	wmsum_init(&dbuf_sums.hash_hits, 0);
886 	wmsum_init(&dbuf_sums.hash_misses, 0);
887 	wmsum_init(&dbuf_sums.hash_collisions, 0);
888 	wmsum_init(&dbuf_sums.hash_chains, 0);
889 	wmsum_init(&dbuf_sums.hash_insert_race, 0);
890 	wmsum_init(&dbuf_sums.metadata_cache_count, 0);
891 	wmsum_init(&dbuf_sums.metadata_cache_overflow, 0);
892 
893 	dbuf_ksp = kstat_create("zfs", 0, "dbufstats", "misc",
894 	    KSTAT_TYPE_NAMED, sizeof (dbuf_stats) / sizeof (kstat_named_t),
895 	    KSTAT_FLAG_VIRTUAL);
896 	if (dbuf_ksp != NULL) {
897 		for (i = 0; i < DN_MAX_LEVELS; i++) {
898 			snprintf(dbuf_stats.cache_levels[i].name,
899 			    KSTAT_STRLEN, "cache_level_%d", i);
900 			dbuf_stats.cache_levels[i].data_type =
901 			    KSTAT_DATA_UINT64;
902 			snprintf(dbuf_stats.cache_levels_bytes[i].name,
903 			    KSTAT_STRLEN, "cache_level_%d_bytes", i);
904 			dbuf_stats.cache_levels_bytes[i].data_type =
905 			    KSTAT_DATA_UINT64;
906 		}
907 		dbuf_ksp->ks_data = &dbuf_stats;
908 		dbuf_ksp->ks_update = dbuf_kstat_update;
909 		kstat_install(dbuf_ksp);
910 	}
911 }
912 
913 void
dbuf_fini(void)914 dbuf_fini(void)
915 {
916 	dbuf_hash_table_t *h = &dbuf_hash_table;
917 	int i;
918 
919 	dbuf_stats_destroy();
920 
921 	for (i = 0; i < DBUF_MUTEXES; i++)
922 		mutex_destroy(&h->hash_mutexes[i]);
923 #if defined(_KERNEL)
924 	/*
925 	 * Large allocations which do not require contiguous pages
926 	 * should be using vmem_free() in the linux kernel
927 	 */
928 	vmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
929 #else
930 	kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
931 #endif
932 	kmem_cache_destroy(dbuf_kmem_cache);
933 	taskq_destroy(dbu_evict_taskq);
934 
935 	mutex_enter(&dbuf_evict_lock);
936 	dbuf_evict_thread_exit = B_TRUE;
937 	while (dbuf_evict_thread_exit) {
938 		cv_signal(&dbuf_evict_cv);
939 		cv_wait(&dbuf_evict_cv, &dbuf_evict_lock);
940 	}
941 	mutex_exit(&dbuf_evict_lock);
942 
943 	mutex_destroy(&dbuf_evict_lock);
944 	cv_destroy(&dbuf_evict_cv);
945 
946 	for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) {
947 		zfs_refcount_destroy(&dbuf_caches[dcs].size);
948 		multilist_destroy(&dbuf_caches[dcs].cache);
949 	}
950 
951 	if (dbuf_ksp != NULL) {
952 		kstat_delete(dbuf_ksp);
953 		dbuf_ksp = NULL;
954 	}
955 
956 	wmsum_fini(&dbuf_sums.cache_count);
957 	wmsum_fini(&dbuf_sums.cache_total_evicts);
958 	for (i = 0; i < DN_MAX_LEVELS; i++) {
959 		wmsum_fini(&dbuf_sums.cache_levels[i]);
960 		wmsum_fini(&dbuf_sums.cache_levels_bytes[i]);
961 	}
962 	wmsum_fini(&dbuf_sums.hash_hits);
963 	wmsum_fini(&dbuf_sums.hash_misses);
964 	wmsum_fini(&dbuf_sums.hash_collisions);
965 	wmsum_fini(&dbuf_sums.hash_chains);
966 	wmsum_fini(&dbuf_sums.hash_insert_race);
967 	wmsum_fini(&dbuf_sums.metadata_cache_count);
968 	wmsum_fini(&dbuf_sums.metadata_cache_overflow);
969 }
970 
971 /*
972  * Other stuff.
973  */
974 
975 #ifdef ZFS_DEBUG
976 static void
dbuf_verify(dmu_buf_impl_t * db)977 dbuf_verify(dmu_buf_impl_t *db)
978 {
979 	dnode_t *dn;
980 	dbuf_dirty_record_t *dr;
981 	uint32_t txg_prev;
982 
983 	ASSERT(MUTEX_HELD(&db->db_mtx));
984 
985 	if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY))
986 		return;
987 
988 	ASSERT(db->db_objset != NULL);
989 	DB_DNODE_ENTER(db);
990 	dn = DB_DNODE(db);
991 	if (dn == NULL) {
992 		ASSERT(db->db_parent == NULL);
993 		ASSERT(db->db_blkptr == NULL);
994 	} else {
995 		ASSERT3U(db->db.db_object, ==, dn->dn_object);
996 		ASSERT3P(db->db_objset, ==, dn->dn_objset);
997 		ASSERT3U(db->db_level, <, dn->dn_nlevels);
998 		ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
999 		    db->db_blkid == DMU_SPILL_BLKID ||
1000 		    !avl_is_empty(&dn->dn_dbufs));
1001 	}
1002 	if (db->db_blkid == DMU_BONUS_BLKID) {
1003 		ASSERT(dn != NULL);
1004 		ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
1005 		ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID);
1006 	} else if (db->db_blkid == DMU_SPILL_BLKID) {
1007 		ASSERT(dn != NULL);
1008 		ASSERT0(db->db.db_offset);
1009 	} else {
1010 		ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size);
1011 	}
1012 
1013 	if ((dr = list_head(&db->db_dirty_records)) != NULL) {
1014 		ASSERT(dr->dr_dbuf == db);
1015 		txg_prev = dr->dr_txg;
1016 		for (dr = list_next(&db->db_dirty_records, dr); dr != NULL;
1017 		    dr = list_next(&db->db_dirty_records, dr)) {
1018 			ASSERT(dr->dr_dbuf == db);
1019 			ASSERT(txg_prev > dr->dr_txg);
1020 			txg_prev = dr->dr_txg;
1021 		}
1022 	}
1023 
1024 	/*
1025 	 * We can't assert that db_size matches dn_datablksz because it
1026 	 * can be momentarily different when another thread is doing
1027 	 * dnode_set_blksz().
1028 	 */
1029 	if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) {
1030 		dr = db->db_data_pending;
1031 		/*
1032 		 * It should only be modified in syncing context, so
1033 		 * make sure we only have one copy of the data.
1034 		 */
1035 		ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf);
1036 	}
1037 
1038 	/* verify db->db_blkptr */
1039 	if (db->db_blkptr) {
1040 		if (db->db_parent == dn->dn_dbuf) {
1041 			/* db is pointed to by the dnode */
1042 			/* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */
1043 			if (DMU_OBJECT_IS_SPECIAL(db->db.db_object))
1044 				ASSERT(db->db_parent == NULL);
1045 			else
1046 				ASSERT(db->db_parent != NULL);
1047 			if (db->db_blkid != DMU_SPILL_BLKID)
1048 				ASSERT3P(db->db_blkptr, ==,
1049 				    &dn->dn_phys->dn_blkptr[db->db_blkid]);
1050 		} else {
1051 			/* db is pointed to by an indirect block */
1052 			int epb __maybe_unused = db->db_parent->db.db_size >>
1053 			    SPA_BLKPTRSHIFT;
1054 			ASSERT3U(db->db_parent->db_level, ==, db->db_level+1);
1055 			ASSERT3U(db->db_parent->db.db_object, ==,
1056 			    db->db.db_object);
1057 			/*
1058 			 * dnode_grow_indblksz() can make this fail if we don't
1059 			 * have the parent's rwlock.  XXX indblksz no longer
1060 			 * grows.  safe to do this now?
1061 			 */
1062 			if (RW_LOCK_HELD(&db->db_parent->db_rwlock)) {
1063 				ASSERT3P(db->db_blkptr, ==,
1064 				    ((blkptr_t *)db->db_parent->db.db_data +
1065 				    db->db_blkid % epb));
1066 			}
1067 		}
1068 	}
1069 	if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) &&
1070 	    (db->db_buf == NULL || db->db_buf->b_data) &&
1071 	    db->db.db_data && db->db_blkid != DMU_BONUS_BLKID &&
1072 	    db->db_state != DB_FILL && !dn->dn_free_txg) {
1073 		/*
1074 		 * If the blkptr isn't set but they have nonzero data,
1075 		 * it had better be dirty, otherwise we'll lose that
1076 		 * data when we evict this buffer.
1077 		 *
1078 		 * There is an exception to this rule for indirect blocks; in
1079 		 * this case, if the indirect block is a hole, we fill in a few
1080 		 * fields on each of the child blocks (importantly, birth time)
1081 		 * to prevent hole birth times from being lost when you
1082 		 * partially fill in a hole.
1083 		 */
1084 		if (db->db_dirtycnt == 0) {
1085 			if (db->db_level == 0) {
1086 				uint64_t *buf = db->db.db_data;
1087 				int i;
1088 
1089 				for (i = 0; i < db->db.db_size >> 3; i++) {
1090 					ASSERT(buf[i] == 0);
1091 				}
1092 			} else {
1093 				blkptr_t *bps = db->db.db_data;
1094 				ASSERT3U(1 << DB_DNODE(db)->dn_indblkshift, ==,
1095 				    db->db.db_size);
1096 				/*
1097 				 * We want to verify that all the blkptrs in the
1098 				 * indirect block are holes, but we may have
1099 				 * automatically set up a few fields for them.
1100 				 * We iterate through each blkptr and verify
1101 				 * they only have those fields set.
1102 				 */
1103 				for (int i = 0;
1104 				    i < db->db.db_size / sizeof (blkptr_t);
1105 				    i++) {
1106 					blkptr_t *bp = &bps[i];
1107 					ASSERT(ZIO_CHECKSUM_IS_ZERO(
1108 					    &bp->blk_cksum));
1109 					ASSERT(
1110 					    DVA_IS_EMPTY(&bp->blk_dva[0]) &&
1111 					    DVA_IS_EMPTY(&bp->blk_dva[1]) &&
1112 					    DVA_IS_EMPTY(&bp->blk_dva[2]));
1113 					ASSERT0(bp->blk_fill);
1114 					ASSERT0(bp->blk_pad[0]);
1115 					ASSERT0(bp->blk_pad[1]);
1116 					ASSERT(!BP_IS_EMBEDDED(bp));
1117 					ASSERT(BP_IS_HOLE(bp));
1118 					ASSERT0(bp->blk_phys_birth);
1119 				}
1120 			}
1121 		}
1122 	}
1123 	DB_DNODE_EXIT(db);
1124 }
1125 #endif
1126 
1127 static void
dbuf_clear_data(dmu_buf_impl_t * db)1128 dbuf_clear_data(dmu_buf_impl_t *db)
1129 {
1130 	ASSERT(MUTEX_HELD(&db->db_mtx));
1131 	dbuf_evict_user(db);
1132 	ASSERT3P(db->db_buf, ==, NULL);
1133 	db->db.db_data = NULL;
1134 	if (db->db_state != DB_NOFILL) {
1135 		db->db_state = DB_UNCACHED;
1136 		DTRACE_SET_STATE(db, "clear data");
1137 	}
1138 }
1139 
1140 static void
dbuf_set_data(dmu_buf_impl_t * db,arc_buf_t * buf)1141 dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf)
1142 {
1143 	ASSERT(MUTEX_HELD(&db->db_mtx));
1144 	ASSERT(buf != NULL);
1145 
1146 	db->db_buf = buf;
1147 	ASSERT(buf->b_data != NULL);
1148 	db->db.db_data = buf->b_data;
1149 }
1150 
1151 static arc_buf_t *
dbuf_alloc_arcbuf(dmu_buf_impl_t * db)1152 dbuf_alloc_arcbuf(dmu_buf_impl_t *db)
1153 {
1154 	spa_t *spa = db->db_objset->os_spa;
1155 
1156 	return (arc_alloc_buf(spa, db, DBUF_GET_BUFC_TYPE(db), db->db.db_size));
1157 }
1158 
1159 /*
1160  * Loan out an arc_buf for read.  Return the loaned arc_buf.
1161  */
1162 arc_buf_t *
dbuf_loan_arcbuf(dmu_buf_impl_t * db)1163 dbuf_loan_arcbuf(dmu_buf_impl_t *db)
1164 {
1165 	arc_buf_t *abuf;
1166 
1167 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1168 	mutex_enter(&db->db_mtx);
1169 	if (arc_released(db->db_buf) || zfs_refcount_count(&db->db_holds) > 1) {
1170 		int blksz = db->db.db_size;
1171 		spa_t *spa = db->db_objset->os_spa;
1172 
1173 		mutex_exit(&db->db_mtx);
1174 		abuf = arc_loan_buf(spa, B_FALSE, blksz);
1175 		bcopy(db->db.db_data, abuf->b_data, blksz);
1176 	} else {
1177 		abuf = db->db_buf;
1178 		arc_loan_inuse_buf(abuf, db);
1179 		db->db_buf = NULL;
1180 		dbuf_clear_data(db);
1181 		mutex_exit(&db->db_mtx);
1182 	}
1183 	return (abuf);
1184 }
1185 
1186 /*
1187  * Calculate which level n block references the data at the level 0 offset
1188  * provided.
1189  */
1190 uint64_t
dbuf_whichblock(const dnode_t * dn,const int64_t level,const uint64_t offset)1191 dbuf_whichblock(const dnode_t *dn, const int64_t level, const uint64_t offset)
1192 {
1193 	if (dn->dn_datablkshift != 0 && dn->dn_indblkshift != 0) {
1194 		/*
1195 		 * The level n blkid is equal to the level 0 blkid divided by
1196 		 * the number of level 0s in a level n block.
1197 		 *
1198 		 * The level 0 blkid is offset >> datablkshift =
1199 		 * offset / 2^datablkshift.
1200 		 *
1201 		 * The number of level 0s in a level n is the number of block
1202 		 * pointers in an indirect block, raised to the power of level.
1203 		 * This is 2^(indblkshift - SPA_BLKPTRSHIFT)^level =
1204 		 * 2^(level*(indblkshift - SPA_BLKPTRSHIFT)).
1205 		 *
1206 		 * Thus, the level n blkid is: offset /
1207 		 * ((2^datablkshift)*(2^(level*(indblkshift-SPA_BLKPTRSHIFT))))
1208 		 * = offset / 2^(datablkshift + level *
1209 		 *   (indblkshift - SPA_BLKPTRSHIFT))
1210 		 * = offset >> (datablkshift + level *
1211 		 *   (indblkshift - SPA_BLKPTRSHIFT))
1212 		 */
1213 
1214 		const unsigned exp = dn->dn_datablkshift +
1215 		    level * (dn->dn_indblkshift - SPA_BLKPTRSHIFT);
1216 
1217 		if (exp >= 8 * sizeof (offset)) {
1218 			/* This only happens on the highest indirection level */
1219 			ASSERT3U(level, ==, dn->dn_nlevels - 1);
1220 			return (0);
1221 		}
1222 
1223 		ASSERT3U(exp, <, 8 * sizeof (offset));
1224 
1225 		return (offset >> exp);
1226 	} else {
1227 		ASSERT3U(offset, <, dn->dn_datablksz);
1228 		return (0);
1229 	}
1230 }
1231 
1232 /*
1233  * This function is used to lock the parent of the provided dbuf. This should be
1234  * used when modifying or reading db_blkptr.
1235  */
1236 db_lock_type_t
dmu_buf_lock_parent(dmu_buf_impl_t * db,krw_t rw,void * tag)1237 dmu_buf_lock_parent(dmu_buf_impl_t *db, krw_t rw, void *tag)
1238 {
1239 	enum db_lock_type ret = DLT_NONE;
1240 	if (db->db_parent != NULL) {
1241 		rw_enter(&db->db_parent->db_rwlock, rw);
1242 		ret = DLT_PARENT;
1243 	} else if (dmu_objset_ds(db->db_objset) != NULL) {
1244 		rrw_enter(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, rw,
1245 		    tag);
1246 		ret = DLT_OBJSET;
1247 	}
1248 	/*
1249 	 * We only return a DLT_NONE lock when it's the top-most indirect block
1250 	 * of the meta-dnode of the MOS.
1251 	 */
1252 	return (ret);
1253 }
1254 
1255 /*
1256  * We need to pass the lock type in because it's possible that the block will
1257  * move from being the topmost indirect block in a dnode (and thus, have no
1258  * parent) to not the top-most via an indirection increase. This would cause a
1259  * panic if we didn't pass the lock type in.
1260  */
1261 void
dmu_buf_unlock_parent(dmu_buf_impl_t * db,db_lock_type_t type,void * tag)1262 dmu_buf_unlock_parent(dmu_buf_impl_t *db, db_lock_type_t type, void *tag)
1263 {
1264 	if (type == DLT_PARENT)
1265 		rw_exit(&db->db_parent->db_rwlock);
1266 	else if (type == DLT_OBJSET)
1267 		rrw_exit(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, tag);
1268 }
1269 
1270 static void
dbuf_read_done(zio_t * zio,const zbookmark_phys_t * zb,const blkptr_t * bp,arc_buf_t * buf,void * vdb)1271 dbuf_read_done(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
1272     arc_buf_t *buf, void *vdb)
1273 {
1274 	(void) zb, (void) bp;
1275 	dmu_buf_impl_t *db = vdb;
1276 
1277 	mutex_enter(&db->db_mtx);
1278 	ASSERT3U(db->db_state, ==, DB_READ);
1279 	/*
1280 	 * All reads are synchronous, so we must have a hold on the dbuf
1281 	 */
1282 	ASSERT(zfs_refcount_count(&db->db_holds) > 0);
1283 	ASSERT(db->db_buf == NULL);
1284 	ASSERT(db->db.db_data == NULL);
1285 	if (buf == NULL) {
1286 		/* i/o error */
1287 		ASSERT(zio == NULL || zio->io_error != 0);
1288 		ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1289 		ASSERT3P(db->db_buf, ==, NULL);
1290 		db->db_state = DB_UNCACHED;
1291 		DTRACE_SET_STATE(db, "i/o error");
1292 	} else if (db->db_level == 0 && db->db_freed_in_flight) {
1293 		/* freed in flight */
1294 		ASSERT(zio == NULL || zio->io_error == 0);
1295 		arc_release(buf, db);
1296 		bzero(buf->b_data, db->db.db_size);
1297 		arc_buf_freeze(buf);
1298 		db->db_freed_in_flight = FALSE;
1299 		dbuf_set_data(db, buf);
1300 		db->db_state = DB_CACHED;
1301 		DTRACE_SET_STATE(db, "freed in flight");
1302 	} else {
1303 		/* success */
1304 		ASSERT(zio == NULL || zio->io_error == 0);
1305 		dbuf_set_data(db, buf);
1306 		db->db_state = DB_CACHED;
1307 		DTRACE_SET_STATE(db, "successful read");
1308 	}
1309 	cv_broadcast(&db->db_changed);
1310 	dbuf_rele_and_unlock(db, NULL, B_FALSE);
1311 }
1312 
1313 /*
1314  * Shortcut for performing reads on bonus dbufs.  Returns
1315  * an error if we fail to verify the dnode associated with
1316  * a decrypted block. Otherwise success.
1317  */
1318 static int
dbuf_read_bonus(dmu_buf_impl_t * db,dnode_t * dn,uint32_t flags)1319 dbuf_read_bonus(dmu_buf_impl_t *db, dnode_t *dn, uint32_t flags)
1320 {
1321 	int bonuslen, max_bonuslen, err;
1322 
1323 	err = dbuf_read_verify_dnode_crypt(db, flags);
1324 	if (err)
1325 		return (err);
1326 
1327 	bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen);
1328 	max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
1329 	ASSERT(MUTEX_HELD(&db->db_mtx));
1330 	ASSERT(DB_DNODE_HELD(db));
1331 	ASSERT3U(bonuslen, <=, db->db.db_size);
1332 	db->db.db_data = kmem_alloc(max_bonuslen, KM_SLEEP);
1333 	arc_space_consume(max_bonuslen, ARC_SPACE_BONUS);
1334 	if (bonuslen < max_bonuslen)
1335 		bzero(db->db.db_data, max_bonuslen);
1336 	if (bonuslen)
1337 		bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, bonuslen);
1338 	db->db_state = DB_CACHED;
1339 	DTRACE_SET_STATE(db, "bonus buffer filled");
1340 	return (0);
1341 }
1342 
1343 static void
dbuf_handle_indirect_hole(dmu_buf_impl_t * db,dnode_t * dn)1344 dbuf_handle_indirect_hole(dmu_buf_impl_t *db, dnode_t *dn)
1345 {
1346 	blkptr_t *bps = db->db.db_data;
1347 	uint32_t indbs = 1ULL << dn->dn_indblkshift;
1348 	int n_bps = indbs >> SPA_BLKPTRSHIFT;
1349 
1350 	for (int i = 0; i < n_bps; i++) {
1351 		blkptr_t *bp = &bps[i];
1352 
1353 		ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, indbs);
1354 		BP_SET_LSIZE(bp, BP_GET_LEVEL(db->db_blkptr) == 1 ?
1355 		    dn->dn_datablksz : BP_GET_LSIZE(db->db_blkptr));
1356 		BP_SET_TYPE(bp, BP_GET_TYPE(db->db_blkptr));
1357 		BP_SET_LEVEL(bp, BP_GET_LEVEL(db->db_blkptr) - 1);
1358 		BP_SET_BIRTH(bp, db->db_blkptr->blk_birth, 0);
1359 	}
1360 }
1361 
1362 /*
1363  * Handle reads on dbufs that are holes, if necessary.  This function
1364  * requires that the dbuf's mutex is held. Returns success (0) if action
1365  * was taken, ENOENT if no action was taken.
1366  */
1367 static int
dbuf_read_hole(dmu_buf_impl_t * db,dnode_t * dn)1368 dbuf_read_hole(dmu_buf_impl_t *db, dnode_t *dn)
1369 {
1370 	ASSERT(MUTEX_HELD(&db->db_mtx));
1371 
1372 	int is_hole = db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr);
1373 	/*
1374 	 * For level 0 blocks only, if the above check fails:
1375 	 * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync()
1376 	 * processes the delete record and clears the bp while we are waiting
1377 	 * for the dn_mtx (resulting in a "no" from block_freed).
1378 	 */
1379 	if (!is_hole && db->db_level == 0) {
1380 		is_hole = dnode_block_freed(dn, db->db_blkid) ||
1381 		    BP_IS_HOLE(db->db_blkptr);
1382 	}
1383 
1384 	if (is_hole) {
1385 		dbuf_set_data(db, dbuf_alloc_arcbuf(db));
1386 		bzero(db->db.db_data, db->db.db_size);
1387 
1388 		if (db->db_blkptr != NULL && db->db_level > 0 &&
1389 		    BP_IS_HOLE(db->db_blkptr) &&
1390 		    db->db_blkptr->blk_birth != 0) {
1391 			dbuf_handle_indirect_hole(db, dn);
1392 		}
1393 		db->db_state = DB_CACHED;
1394 		DTRACE_SET_STATE(db, "hole read satisfied");
1395 		return (0);
1396 	}
1397 	return (ENOENT);
1398 }
1399 
1400 /*
1401  * This function ensures that, when doing a decrypting read of a block,
1402  * we make sure we have decrypted the dnode associated with it. We must do
1403  * this so that we ensure we are fully authenticating the checksum-of-MACs
1404  * tree from the root of the objset down to this block. Indirect blocks are
1405  * always verified against their secure checksum-of-MACs assuming that the
1406  * dnode containing them is correct. Now that we are doing a decrypting read,
1407  * we can be sure that the key is loaded and verify that assumption. This is
1408  * especially important considering that we always read encrypted dnode
1409  * blocks as raw data (without verifying their MACs) to start, and
1410  * decrypt / authenticate them when we need to read an encrypted bonus buffer.
1411  */
1412 static int
dbuf_read_verify_dnode_crypt(dmu_buf_impl_t * db,uint32_t flags)1413 dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, uint32_t flags)
1414 {
1415 	int err = 0;
1416 	objset_t *os = db->db_objset;
1417 	arc_buf_t *dnode_abuf;
1418 	dnode_t *dn;
1419 	zbookmark_phys_t zb;
1420 
1421 	ASSERT(MUTEX_HELD(&db->db_mtx));
1422 
1423 	if (!os->os_encrypted || os->os_raw_receive ||
1424 	    (flags & DB_RF_NO_DECRYPT) != 0)
1425 		return (0);
1426 
1427 	DB_DNODE_ENTER(db);
1428 	dn = DB_DNODE(db);
1429 	dnode_abuf = (dn->dn_dbuf != NULL) ? dn->dn_dbuf->db_buf : NULL;
1430 
1431 	if (dnode_abuf == NULL || !arc_is_encrypted(dnode_abuf)) {
1432 		DB_DNODE_EXIT(db);
1433 		return (0);
1434 	}
1435 
1436 	SET_BOOKMARK(&zb, dmu_objset_id(os),
1437 	    DMU_META_DNODE_OBJECT, 0, dn->dn_dbuf->db_blkid);
1438 	err = arc_untransform(dnode_abuf, os->os_spa, &zb, B_TRUE);
1439 
1440 	/*
1441 	 * An error code of EACCES tells us that the key is still not
1442 	 * available. This is ok if we are only reading authenticated
1443 	 * (and therefore non-encrypted) blocks.
1444 	 */
1445 	if (err == EACCES && ((db->db_blkid != DMU_BONUS_BLKID &&
1446 	    !DMU_OT_IS_ENCRYPTED(dn->dn_type)) ||
1447 	    (db->db_blkid == DMU_BONUS_BLKID &&
1448 	    !DMU_OT_IS_ENCRYPTED(dn->dn_bonustype))))
1449 		err = 0;
1450 
1451 	DB_DNODE_EXIT(db);
1452 
1453 	return (err);
1454 }
1455 
1456 /*
1457  * Drops db_mtx and the parent lock specified by dblt and tag before
1458  * returning.
1459  */
1460 static int
dbuf_read_impl(dmu_buf_impl_t * db,zio_t * zio,uint32_t flags,db_lock_type_t dblt,void * tag)1461 dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags,
1462     db_lock_type_t dblt, void *tag)
1463 {
1464 	dnode_t *dn;
1465 	zbookmark_phys_t zb;
1466 	uint32_t aflags = ARC_FLAG_NOWAIT;
1467 	int err, zio_flags;
1468 	boolean_t bonus_read;
1469 
1470 	err = zio_flags = 0;
1471 	bonus_read = B_FALSE;
1472 	DB_DNODE_ENTER(db);
1473 	dn = DB_DNODE(db);
1474 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1475 	ASSERT(MUTEX_HELD(&db->db_mtx));
1476 	ASSERT(db->db_state == DB_UNCACHED);
1477 	ASSERT(db->db_buf == NULL);
1478 	ASSERT(db->db_parent == NULL ||
1479 	    RW_LOCK_HELD(&db->db_parent->db_rwlock));
1480 
1481 	if (db->db_blkid == DMU_BONUS_BLKID) {
1482 		err = dbuf_read_bonus(db, dn, flags);
1483 		goto early_unlock;
1484 	}
1485 
1486 	err = dbuf_read_hole(db, dn);
1487 	if (err == 0)
1488 		goto early_unlock;
1489 
1490 	/*
1491 	 * Any attempt to read a redacted block should result in an error. This
1492 	 * will never happen under normal conditions, but can be useful for
1493 	 * debugging purposes.
1494 	 */
1495 	if (BP_IS_REDACTED(db->db_blkptr)) {
1496 		ASSERT(dsl_dataset_feature_is_active(
1497 		    db->db_objset->os_dsl_dataset,
1498 		    SPA_FEATURE_REDACTED_DATASETS));
1499 		err = SET_ERROR(EIO);
1500 		goto early_unlock;
1501 	}
1502 
1503 	SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
1504 	    db->db.db_object, db->db_level, db->db_blkid);
1505 
1506 	/*
1507 	 * All bps of an encrypted os should have the encryption bit set.
1508 	 * If this is not true it indicates tampering and we report an error.
1509 	 */
1510 	if (db->db_objset->os_encrypted && !BP_USES_CRYPT(db->db_blkptr)) {
1511 		spa_log_error(db->db_objset->os_spa, &zb);
1512 		zfs_panic_recover("unencrypted block in encrypted "
1513 		    "object set %llu", dmu_objset_id(db->db_objset));
1514 		err = SET_ERROR(EIO);
1515 		goto early_unlock;
1516 	}
1517 
1518 	err = dbuf_read_verify_dnode_crypt(db, flags);
1519 	if (err != 0)
1520 		goto early_unlock;
1521 
1522 	DB_DNODE_EXIT(db);
1523 
1524 	db->db_state = DB_READ;
1525 	DTRACE_SET_STATE(db, "read issued");
1526 	mutex_exit(&db->db_mtx);
1527 
1528 	if (DBUF_IS_L2CACHEABLE(db))
1529 		aflags |= ARC_FLAG_L2CACHE;
1530 
1531 	dbuf_add_ref(db, NULL);
1532 
1533 	zio_flags = (flags & DB_RF_CANFAIL) ?
1534 	    ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED;
1535 
1536 	if ((flags & DB_RF_NO_DECRYPT) && BP_IS_PROTECTED(db->db_blkptr))
1537 		zio_flags |= ZIO_FLAG_RAW;
1538 	/*
1539 	 * The zio layer will copy the provided blkptr later, but we need to
1540 	 * do this now so that we can release the parent's rwlock. We have to
1541 	 * do that now so that if dbuf_read_done is called synchronously (on
1542 	 * an l1 cache hit) we don't acquire the db_mtx while holding the
1543 	 * parent's rwlock, which would be a lock ordering violation.
1544 	 */
1545 	blkptr_t bp = *db->db_blkptr;
1546 	dmu_buf_unlock_parent(db, dblt, tag);
1547 	(void) arc_read(zio, db->db_objset->os_spa, &bp,
1548 	    dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, zio_flags,
1549 	    &aflags, &zb);
1550 	return (err);
1551 early_unlock:
1552 	DB_DNODE_EXIT(db);
1553 	mutex_exit(&db->db_mtx);
1554 	dmu_buf_unlock_parent(db, dblt, tag);
1555 	return (err);
1556 }
1557 
1558 /*
1559  * This is our just-in-time copy function.  It makes a copy of buffers that
1560  * have been modified in a previous transaction group before we access them in
1561  * the current active group.
1562  *
1563  * This function is used in three places: when we are dirtying a buffer for the
1564  * first time in a txg, when we are freeing a range in a dnode that includes
1565  * this buffer, and when we are accessing a buffer which was received compressed
1566  * and later referenced in a WRITE_BYREF record.
1567  *
1568  * Note that when we are called from dbuf_free_range() we do not put a hold on
1569  * the buffer, we just traverse the active dbuf list for the dnode.
1570  */
1571 static void
dbuf_fix_old_data(dmu_buf_impl_t * db,uint64_t txg)1572 dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
1573 {
1574 	dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records);
1575 
1576 	ASSERT(MUTEX_HELD(&db->db_mtx));
1577 	ASSERT(db->db.db_data != NULL);
1578 	ASSERT(db->db_level == 0);
1579 	ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT);
1580 
1581 	if (dr == NULL ||
1582 	    (dr->dt.dl.dr_data !=
1583 	    ((db->db_blkid  == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf)))
1584 		return;
1585 
1586 	/*
1587 	 * If the last dirty record for this dbuf has not yet synced
1588 	 * and its referencing the dbuf data, either:
1589 	 *	reset the reference to point to a new copy,
1590 	 * or (if there a no active holders)
1591 	 *	just null out the current db_data pointer.
1592 	 */
1593 	ASSERT3U(dr->dr_txg, >=, txg - 2);
1594 	if (db->db_blkid == DMU_BONUS_BLKID) {
1595 		dnode_t *dn = DB_DNODE(db);
1596 		int bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
1597 		dr->dt.dl.dr_data = kmem_alloc(bonuslen, KM_SLEEP);
1598 		arc_space_consume(bonuslen, ARC_SPACE_BONUS);
1599 		bcopy(db->db.db_data, dr->dt.dl.dr_data, bonuslen);
1600 	} else if (zfs_refcount_count(&db->db_holds) > db->db_dirtycnt) {
1601 		dnode_t *dn = DB_DNODE(db);
1602 		int size = arc_buf_size(db->db_buf);
1603 		arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
1604 		spa_t *spa = db->db_objset->os_spa;
1605 		enum zio_compress compress_type =
1606 		    arc_get_compression(db->db_buf);
1607 		uint8_t complevel = arc_get_complevel(db->db_buf);
1608 
1609 		if (arc_is_encrypted(db->db_buf)) {
1610 			boolean_t byteorder;
1611 			uint8_t salt[ZIO_DATA_SALT_LEN];
1612 			uint8_t iv[ZIO_DATA_IV_LEN];
1613 			uint8_t mac[ZIO_DATA_MAC_LEN];
1614 
1615 			arc_get_raw_params(db->db_buf, &byteorder, salt,
1616 			    iv, mac);
1617 			dr->dt.dl.dr_data = arc_alloc_raw_buf(spa, db,
1618 			    dmu_objset_id(dn->dn_objset), byteorder, salt, iv,
1619 			    mac, dn->dn_type, size, arc_buf_lsize(db->db_buf),
1620 			    compress_type, complevel);
1621 		} else if (compress_type != ZIO_COMPRESS_OFF) {
1622 			ASSERT3U(type, ==, ARC_BUFC_DATA);
1623 			dr->dt.dl.dr_data = arc_alloc_compressed_buf(spa, db,
1624 			    size, arc_buf_lsize(db->db_buf), compress_type,
1625 			    complevel);
1626 		} else {
1627 			dr->dt.dl.dr_data = arc_alloc_buf(spa, db, type, size);
1628 		}
1629 		bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size);
1630 	} else {
1631 		db->db_buf = NULL;
1632 		dbuf_clear_data(db);
1633 	}
1634 }
1635 
1636 int
dbuf_read(dmu_buf_impl_t * db,zio_t * zio,uint32_t flags)1637 dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
1638 {
1639 	int err = 0;
1640 	boolean_t prefetch;
1641 	dnode_t *dn;
1642 
1643 	/*
1644 	 * We don't have to hold the mutex to check db_state because it
1645 	 * can't be freed while we have a hold on the buffer.
1646 	 */
1647 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1648 
1649 	if (db->db_state == DB_NOFILL)
1650 		return (SET_ERROR(EIO));
1651 
1652 	DB_DNODE_ENTER(db);
1653 	dn = DB_DNODE(db);
1654 
1655 	prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
1656 	    (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL &&
1657 	    DBUF_IS_CACHEABLE(db);
1658 
1659 	mutex_enter(&db->db_mtx);
1660 	if (db->db_state == DB_CACHED) {
1661 		spa_t *spa = dn->dn_objset->os_spa;
1662 
1663 		/*
1664 		 * Ensure that this block's dnode has been decrypted if
1665 		 * the caller has requested decrypted data.
1666 		 */
1667 		err = dbuf_read_verify_dnode_crypt(db, flags);
1668 
1669 		/*
1670 		 * If the arc buf is compressed or encrypted and the caller
1671 		 * requested uncompressed data, we need to untransform it
1672 		 * before returning. We also call arc_untransform() on any
1673 		 * unauthenticated blocks, which will verify their MAC if
1674 		 * the key is now available.
1675 		 */
1676 		if (err == 0 && db->db_buf != NULL &&
1677 		    (flags & DB_RF_NO_DECRYPT) == 0 &&
1678 		    (arc_is_encrypted(db->db_buf) ||
1679 		    arc_is_unauthenticated(db->db_buf) ||
1680 		    arc_get_compression(db->db_buf) != ZIO_COMPRESS_OFF)) {
1681 			zbookmark_phys_t zb;
1682 
1683 			SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
1684 			    db->db.db_object, db->db_level, db->db_blkid);
1685 			dbuf_fix_old_data(db, spa_syncing_txg(spa));
1686 			err = arc_untransform(db->db_buf, spa, &zb, B_FALSE);
1687 			dbuf_set_data(db, db->db_buf);
1688 		}
1689 		mutex_exit(&db->db_mtx);
1690 		if (err == 0 && prefetch) {
1691 			dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE,
1692 			    B_FALSE, flags & DB_RF_HAVESTRUCT);
1693 		}
1694 		DB_DNODE_EXIT(db);
1695 		DBUF_STAT_BUMP(hash_hits);
1696 	} else if (db->db_state == DB_UNCACHED) {
1697 		spa_t *spa = dn->dn_objset->os_spa;
1698 		boolean_t need_wait = B_FALSE;
1699 
1700 		db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
1701 
1702 		if (zio == NULL &&
1703 		    db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)) {
1704 			zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
1705 			need_wait = B_TRUE;
1706 		}
1707 		err = dbuf_read_impl(db, zio, flags, dblt, FTAG);
1708 		/*
1709 		 * dbuf_read_impl has dropped db_mtx and our parent's rwlock
1710 		 * for us
1711 		 */
1712 		if (!err && prefetch) {
1713 			dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE,
1714 			    db->db_state != DB_CACHED,
1715 			    flags & DB_RF_HAVESTRUCT);
1716 		}
1717 
1718 		DB_DNODE_EXIT(db);
1719 		DBUF_STAT_BUMP(hash_misses);
1720 
1721 		/*
1722 		 * If we created a zio_root we must execute it to avoid
1723 		 * leaking it, even if it isn't attached to any work due
1724 		 * to an error in dbuf_read_impl().
1725 		 */
1726 		if (need_wait) {
1727 			if (err == 0)
1728 				err = zio_wait(zio);
1729 			else
1730 				VERIFY0(zio_wait(zio));
1731 		}
1732 	} else {
1733 		/*
1734 		 * Another reader came in while the dbuf was in flight
1735 		 * between UNCACHED and CACHED.  Either a writer will finish
1736 		 * writing the buffer (sending the dbuf to CACHED) or the
1737 		 * first reader's request will reach the read_done callback
1738 		 * and send the dbuf to CACHED.  Otherwise, a failure
1739 		 * occurred and the dbuf went to UNCACHED.
1740 		 */
1741 		mutex_exit(&db->db_mtx);
1742 		if (prefetch) {
1743 			dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE,
1744 			    B_TRUE, flags & DB_RF_HAVESTRUCT);
1745 		}
1746 		DB_DNODE_EXIT(db);
1747 		DBUF_STAT_BUMP(hash_misses);
1748 
1749 		/* Skip the wait per the caller's request. */
1750 		if ((flags & DB_RF_NEVERWAIT) == 0) {
1751 			mutex_enter(&db->db_mtx);
1752 			while (db->db_state == DB_READ ||
1753 			    db->db_state == DB_FILL) {
1754 				ASSERT(db->db_state == DB_READ ||
1755 				    (flags & DB_RF_HAVESTRUCT) == 0);
1756 				DTRACE_PROBE2(blocked__read, dmu_buf_impl_t *,
1757 				    db, zio_t *, zio);
1758 				cv_wait(&db->db_changed, &db->db_mtx);
1759 			}
1760 			if (db->db_state == DB_UNCACHED)
1761 				err = SET_ERROR(EIO);
1762 			mutex_exit(&db->db_mtx);
1763 		}
1764 	}
1765 
1766 	return (err);
1767 }
1768 
1769 static void
dbuf_noread(dmu_buf_impl_t * db)1770 dbuf_noread(dmu_buf_impl_t *db)
1771 {
1772 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1773 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1774 	mutex_enter(&db->db_mtx);
1775 	while (db->db_state == DB_READ || db->db_state == DB_FILL)
1776 		cv_wait(&db->db_changed, &db->db_mtx);
1777 	if (db->db_state == DB_UNCACHED) {
1778 		ASSERT(db->db_buf == NULL);
1779 		ASSERT(db->db.db_data == NULL);
1780 		dbuf_set_data(db, dbuf_alloc_arcbuf(db));
1781 		db->db_state = DB_FILL;
1782 		DTRACE_SET_STATE(db, "assigning filled buffer");
1783 	} else if (db->db_state == DB_NOFILL) {
1784 		dbuf_clear_data(db);
1785 	} else {
1786 		ASSERT3U(db->db_state, ==, DB_CACHED);
1787 	}
1788 	mutex_exit(&db->db_mtx);
1789 }
1790 
1791 void
dbuf_unoverride(dbuf_dirty_record_t * dr)1792 dbuf_unoverride(dbuf_dirty_record_t *dr)
1793 {
1794 	dmu_buf_impl_t *db = dr->dr_dbuf;
1795 	blkptr_t *bp = &dr->dt.dl.dr_overridden_by;
1796 	uint64_t txg = dr->dr_txg;
1797 
1798 	ASSERT(MUTEX_HELD(&db->db_mtx));
1799 	/*
1800 	 * This assert is valid because dmu_sync() expects to be called by
1801 	 * a zilog's get_data while holding a range lock.  This call only
1802 	 * comes from dbuf_dirty() callers who must also hold a range lock.
1803 	 */
1804 	ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC);
1805 	ASSERT(db->db_level == 0);
1806 
1807 	if (db->db_blkid == DMU_BONUS_BLKID ||
1808 	    dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN)
1809 		return;
1810 
1811 	ASSERT(db->db_data_pending != dr);
1812 
1813 	/* free this block */
1814 	if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite)
1815 		zio_free(db->db_objset->os_spa, txg, bp);
1816 
1817 	dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
1818 	dr->dt.dl.dr_nopwrite = B_FALSE;
1819 	dr->dt.dl.dr_has_raw_params = B_FALSE;
1820 
1821 	/*
1822 	 * Release the already-written buffer, so we leave it in
1823 	 * a consistent dirty state.  Note that all callers are
1824 	 * modifying the buffer, so they will immediately do
1825 	 * another (redundant) arc_release().  Therefore, leave
1826 	 * the buf thawed to save the effort of freezing &
1827 	 * immediately re-thawing it.
1828 	 */
1829 	arc_release(dr->dt.dl.dr_data, db);
1830 }
1831 
1832 /*
1833  * Evict (if its unreferenced) or clear (if its referenced) any level-0
1834  * data blocks in the free range, so that any future readers will find
1835  * empty blocks.
1836  */
1837 void
dbuf_free_range(dnode_t * dn,uint64_t start_blkid,uint64_t end_blkid,dmu_tx_t * tx)1838 dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
1839     dmu_tx_t *tx)
1840 {
1841 	dmu_buf_impl_t *db_search;
1842 	dmu_buf_impl_t *db, *db_next;
1843 	uint64_t txg = tx->tx_txg;
1844 	avl_index_t where;
1845 	dbuf_dirty_record_t *dr;
1846 
1847 	if (end_blkid > dn->dn_maxblkid &&
1848 	    !(start_blkid == DMU_SPILL_BLKID || end_blkid == DMU_SPILL_BLKID))
1849 		end_blkid = dn->dn_maxblkid;
1850 	dprintf_dnode(dn, "start=%llu end=%llu\n", (u_longlong_t)start_blkid,
1851 	    (u_longlong_t)end_blkid);
1852 
1853 	db_search = kmem_alloc(sizeof (dmu_buf_impl_t), KM_SLEEP);
1854 	db_search->db_level = 0;
1855 	db_search->db_blkid = start_blkid;
1856 	db_search->db_state = DB_SEARCH;
1857 
1858 	mutex_enter(&dn->dn_dbufs_mtx);
1859 	db = avl_find(&dn->dn_dbufs, db_search, &where);
1860 	ASSERT3P(db, ==, NULL);
1861 
1862 	db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
1863 
1864 	for (; db != NULL; db = db_next) {
1865 		db_next = AVL_NEXT(&dn->dn_dbufs, db);
1866 		ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1867 
1868 		if (db->db_level != 0 || db->db_blkid > end_blkid) {
1869 			break;
1870 		}
1871 		ASSERT3U(db->db_blkid, >=, start_blkid);
1872 
1873 		/* found a level 0 buffer in the range */
1874 		mutex_enter(&db->db_mtx);
1875 		if (dbuf_undirty(db, tx)) {
1876 			/* mutex has been dropped and dbuf destroyed */
1877 			continue;
1878 		}
1879 
1880 		if (db->db_state == DB_UNCACHED ||
1881 		    db->db_state == DB_NOFILL ||
1882 		    db->db_state == DB_EVICTING) {
1883 			ASSERT(db->db.db_data == NULL);
1884 			mutex_exit(&db->db_mtx);
1885 			continue;
1886 		}
1887 		if (db->db_state == DB_READ || db->db_state == DB_FILL) {
1888 			/* will be handled in dbuf_read_done or dbuf_rele */
1889 			db->db_freed_in_flight = TRUE;
1890 			mutex_exit(&db->db_mtx);
1891 			continue;
1892 		}
1893 		if (zfs_refcount_count(&db->db_holds) == 0) {
1894 			ASSERT(db->db_buf);
1895 			dbuf_destroy(db);
1896 			continue;
1897 		}
1898 		/* The dbuf is referenced */
1899 
1900 		dr = list_head(&db->db_dirty_records);
1901 		if (dr != NULL) {
1902 			if (dr->dr_txg == txg) {
1903 				/*
1904 				 * This buffer is "in-use", re-adjust the file
1905 				 * size to reflect that this buffer may
1906 				 * contain new data when we sync.
1907 				 */
1908 				if (db->db_blkid != DMU_SPILL_BLKID &&
1909 				    db->db_blkid > dn->dn_maxblkid)
1910 					dn->dn_maxblkid = db->db_blkid;
1911 				dbuf_unoverride(dr);
1912 			} else {
1913 				/*
1914 				 * This dbuf is not dirty in the open context.
1915 				 * Either uncache it (if its not referenced in
1916 				 * the open context) or reset its contents to
1917 				 * empty.
1918 				 */
1919 				dbuf_fix_old_data(db, txg);
1920 			}
1921 		}
1922 		/* clear the contents if its cached */
1923 		if (db->db_state == DB_CACHED) {
1924 			ASSERT(db->db.db_data != NULL);
1925 			arc_release(db->db_buf, db);
1926 			rw_enter(&db->db_rwlock, RW_WRITER);
1927 			bzero(db->db.db_data, db->db.db_size);
1928 			rw_exit(&db->db_rwlock);
1929 			arc_buf_freeze(db->db_buf);
1930 		}
1931 
1932 		mutex_exit(&db->db_mtx);
1933 	}
1934 
1935 	kmem_free(db_search, sizeof (dmu_buf_impl_t));
1936 	mutex_exit(&dn->dn_dbufs_mtx);
1937 }
1938 
1939 void
dbuf_new_size(dmu_buf_impl_t * db,int size,dmu_tx_t * tx)1940 dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
1941 {
1942 	arc_buf_t *buf, *old_buf;
1943 	dbuf_dirty_record_t *dr;
1944 	int osize = db->db.db_size;
1945 	arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
1946 	dnode_t *dn;
1947 
1948 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1949 
1950 	DB_DNODE_ENTER(db);
1951 	dn = DB_DNODE(db);
1952 
1953 	/*
1954 	 * XXX we should be doing a dbuf_read, checking the return
1955 	 * value and returning that up to our callers
1956 	 */
1957 	dmu_buf_will_dirty(&db->db, tx);
1958 
1959 	/* create the data buffer for the new block */
1960 	buf = arc_alloc_buf(dn->dn_objset->os_spa, db, type, size);
1961 
1962 	/* copy old block data to the new block */
1963 	old_buf = db->db_buf;
1964 	bcopy(old_buf->b_data, buf->b_data, MIN(osize, size));
1965 	/* zero the remainder */
1966 	if (size > osize)
1967 		bzero((uint8_t *)buf->b_data + osize, size - osize);
1968 
1969 	mutex_enter(&db->db_mtx);
1970 	dbuf_set_data(db, buf);
1971 	arc_buf_destroy(old_buf, db);
1972 	db->db.db_size = size;
1973 
1974 	dr = list_head(&db->db_dirty_records);
1975 	/* dirty record added by dmu_buf_will_dirty() */
1976 	VERIFY(dr != NULL);
1977 	if (db->db_level == 0)
1978 		dr->dt.dl.dr_data = buf;
1979 	ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
1980 	ASSERT3U(dr->dr_accounted, ==, osize);
1981 	dr->dr_accounted = size;
1982 	mutex_exit(&db->db_mtx);
1983 
1984 	dmu_objset_willuse_space(dn->dn_objset, size - osize, tx);
1985 	DB_DNODE_EXIT(db);
1986 }
1987 
1988 void
dbuf_release_bp(dmu_buf_impl_t * db)1989 dbuf_release_bp(dmu_buf_impl_t *db)
1990 {
1991 	objset_t *os __maybe_unused = db->db_objset;
1992 
1993 	ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
1994 	ASSERT(arc_released(os->os_phys_buf) ||
1995 	    list_link_active(&os->os_dsl_dataset->ds_synced_link));
1996 	ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf));
1997 
1998 	(void) arc_release(db->db_buf, db);
1999 }
2000 
2001 /*
2002  * We already have a dirty record for this TXG, and we are being
2003  * dirtied again.
2004  */
2005 static void
dbuf_redirty(dbuf_dirty_record_t * dr)2006 dbuf_redirty(dbuf_dirty_record_t *dr)
2007 {
2008 	dmu_buf_impl_t *db = dr->dr_dbuf;
2009 
2010 	ASSERT(MUTEX_HELD(&db->db_mtx));
2011 
2012 	if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) {
2013 		/*
2014 		 * If this buffer has already been written out,
2015 		 * we now need to reset its state.
2016 		 */
2017 		dbuf_unoverride(dr);
2018 		if (db->db.db_object != DMU_META_DNODE_OBJECT &&
2019 		    db->db_state != DB_NOFILL) {
2020 			/* Already released on initial dirty, so just thaw. */
2021 			ASSERT(arc_released(db->db_buf));
2022 			arc_buf_thaw(db->db_buf);
2023 		}
2024 	}
2025 }
2026 
2027 dbuf_dirty_record_t *
dbuf_dirty_lightweight(dnode_t * dn,uint64_t blkid,dmu_tx_t * tx)2028 dbuf_dirty_lightweight(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx)
2029 {
2030 	rw_enter(&dn->dn_struct_rwlock, RW_READER);
2031 	IMPLY(dn->dn_objset->os_raw_receive, dn->dn_maxblkid >= blkid);
2032 	dnode_new_blkid(dn, blkid, tx, B_TRUE, B_FALSE);
2033 	ASSERT(dn->dn_maxblkid >= blkid);
2034 
2035 	dbuf_dirty_record_t *dr = kmem_zalloc(sizeof (*dr), KM_SLEEP);
2036 	list_link_init(&dr->dr_dirty_node);
2037 	list_link_init(&dr->dr_dbuf_node);
2038 	dr->dr_dnode = dn;
2039 	dr->dr_txg = tx->tx_txg;
2040 	dr->dt.dll.dr_blkid = blkid;
2041 	dr->dr_accounted = dn->dn_datablksz;
2042 
2043 	/*
2044 	 * There should not be any dbuf for the block that we're dirtying.
2045 	 * Otherwise the buffer contents could be inconsistent between the
2046 	 * dbuf and the lightweight dirty record.
2047 	 */
2048 	ASSERT3P(NULL, ==, dbuf_find(dn->dn_objset, dn->dn_object, 0, blkid));
2049 
2050 	mutex_enter(&dn->dn_mtx);
2051 	int txgoff = tx->tx_txg & TXG_MASK;
2052 	if (dn->dn_free_ranges[txgoff] != NULL) {
2053 		range_tree_clear(dn->dn_free_ranges[txgoff], blkid, 1);
2054 	}
2055 
2056 	if (dn->dn_nlevels == 1) {
2057 		ASSERT3U(blkid, <, dn->dn_nblkptr);
2058 		list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
2059 		mutex_exit(&dn->dn_mtx);
2060 		rw_exit(&dn->dn_struct_rwlock);
2061 		dnode_setdirty(dn, tx);
2062 	} else {
2063 		mutex_exit(&dn->dn_mtx);
2064 
2065 		int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
2066 		dmu_buf_impl_t *parent_db = dbuf_hold_level(dn,
2067 		    1, blkid >> epbs, FTAG);
2068 		rw_exit(&dn->dn_struct_rwlock);
2069 		if (parent_db == NULL) {
2070 			kmem_free(dr, sizeof (*dr));
2071 			return (NULL);
2072 		}
2073 		int err = dbuf_read(parent_db, NULL,
2074 		    (DB_RF_NOPREFETCH | DB_RF_CANFAIL));
2075 		if (err != 0) {
2076 			dbuf_rele(parent_db, FTAG);
2077 			kmem_free(dr, sizeof (*dr));
2078 			return (NULL);
2079 		}
2080 
2081 		dbuf_dirty_record_t *parent_dr = dbuf_dirty(parent_db, tx);
2082 		dbuf_rele(parent_db, FTAG);
2083 		mutex_enter(&parent_dr->dt.di.dr_mtx);
2084 		ASSERT3U(parent_dr->dr_txg, ==, tx->tx_txg);
2085 		list_insert_tail(&parent_dr->dt.di.dr_children, dr);
2086 		mutex_exit(&parent_dr->dt.di.dr_mtx);
2087 		dr->dr_parent = parent_dr;
2088 	}
2089 
2090 	dmu_objset_willuse_space(dn->dn_objset, dr->dr_accounted, tx);
2091 
2092 	return (dr);
2093 }
2094 
2095 dbuf_dirty_record_t *
dbuf_dirty(dmu_buf_impl_t * db,dmu_tx_t * tx)2096 dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
2097 {
2098 	dnode_t *dn;
2099 	objset_t *os;
2100 	dbuf_dirty_record_t *dr, *dr_next, *dr_head;
2101 	int txgoff = tx->tx_txg & TXG_MASK;
2102 	boolean_t drop_struct_rwlock = B_FALSE;
2103 
2104 	ASSERT(tx->tx_txg != 0);
2105 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2106 	DMU_TX_DIRTY_BUF(tx, db);
2107 
2108 	DB_DNODE_ENTER(db);
2109 	dn = DB_DNODE(db);
2110 	/*
2111 	 * Shouldn't dirty a regular buffer in syncing context.  Private
2112 	 * objects may be dirtied in syncing context, but only if they
2113 	 * were already pre-dirtied in open context.
2114 	 */
2115 #ifdef ZFS_DEBUG
2116 	if (dn->dn_objset->os_dsl_dataset != NULL) {
2117 		rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock,
2118 		    RW_READER, FTAG);
2119 	}
2120 	ASSERT(!dmu_tx_is_syncing(tx) ||
2121 	    BP_IS_HOLE(dn->dn_objset->os_rootbp) ||
2122 	    DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
2123 	    dn->dn_objset->os_dsl_dataset == NULL);
2124 	if (dn->dn_objset->os_dsl_dataset != NULL)
2125 		rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, FTAG);
2126 #endif
2127 	/*
2128 	 * We make this assert for private objects as well, but after we
2129 	 * check if we're already dirty.  They are allowed to re-dirty
2130 	 * in syncing context.
2131 	 */
2132 	ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
2133 	    dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
2134 	    (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
2135 
2136 	mutex_enter(&db->db_mtx);
2137 	/*
2138 	 * XXX make this true for indirects too?  The problem is that
2139 	 * transactions created with dmu_tx_create_assigned() from
2140 	 * syncing context don't bother holding ahead.
2141 	 */
2142 	ASSERT(db->db_level != 0 ||
2143 	    db->db_state == DB_CACHED || db->db_state == DB_FILL ||
2144 	    db->db_state == DB_NOFILL);
2145 
2146 	mutex_enter(&dn->dn_mtx);
2147 	dnode_set_dirtyctx(dn, tx, db);
2148 	if (tx->tx_txg > dn->dn_dirty_txg)
2149 		dn->dn_dirty_txg = tx->tx_txg;
2150 	mutex_exit(&dn->dn_mtx);
2151 
2152 	if (db->db_blkid == DMU_SPILL_BLKID)
2153 		dn->dn_have_spill = B_TRUE;
2154 
2155 	/*
2156 	 * If this buffer is already dirty, we're done.
2157 	 */
2158 	dr_head = list_head(&db->db_dirty_records);
2159 	ASSERT(dr_head == NULL || dr_head->dr_txg <= tx->tx_txg ||
2160 	    db->db.db_object == DMU_META_DNODE_OBJECT);
2161 	dr_next = dbuf_find_dirty_lte(db, tx->tx_txg);
2162 	if (dr_next && dr_next->dr_txg == tx->tx_txg) {
2163 		DB_DNODE_EXIT(db);
2164 
2165 		dbuf_redirty(dr_next);
2166 		mutex_exit(&db->db_mtx);
2167 		return (dr_next);
2168 	}
2169 
2170 	/*
2171 	 * Only valid if not already dirty.
2172 	 */
2173 	ASSERT(dn->dn_object == 0 ||
2174 	    dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
2175 	    (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
2176 
2177 	ASSERT3U(dn->dn_nlevels, >, db->db_level);
2178 
2179 	/*
2180 	 * We should only be dirtying in syncing context if it's the
2181 	 * mos or we're initializing the os or it's a special object.
2182 	 * However, we are allowed to dirty in syncing context provided
2183 	 * we already dirtied it in open context.  Hence we must make
2184 	 * this assertion only if we're not already dirty.
2185 	 */
2186 	os = dn->dn_objset;
2187 	VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(os->os_spa));
2188 #ifdef ZFS_DEBUG
2189 	if (dn->dn_objset->os_dsl_dataset != NULL)
2190 		rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_READER, FTAG);
2191 	ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
2192 	    os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp));
2193 	if (dn->dn_objset->os_dsl_dataset != NULL)
2194 		rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG);
2195 #endif
2196 	ASSERT(db->db.db_size != 0);
2197 
2198 	dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
2199 
2200 	if (db->db_blkid != DMU_BONUS_BLKID) {
2201 		dmu_objset_willuse_space(os, db->db.db_size, tx);
2202 	}
2203 
2204 	/*
2205 	 * If this buffer is dirty in an old transaction group we need
2206 	 * to make a copy of it so that the changes we make in this
2207 	 * transaction group won't leak out when we sync the older txg.
2208 	 */
2209 	dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP);
2210 	list_link_init(&dr->dr_dirty_node);
2211 	list_link_init(&dr->dr_dbuf_node);
2212 	dr->dr_dnode = dn;
2213 	if (db->db_level == 0) {
2214 		void *data_old = db->db_buf;
2215 
2216 		if (db->db_state != DB_NOFILL) {
2217 			if (db->db_blkid == DMU_BONUS_BLKID) {
2218 				dbuf_fix_old_data(db, tx->tx_txg);
2219 				data_old = db->db.db_data;
2220 			} else if (db->db.db_object != DMU_META_DNODE_OBJECT) {
2221 				/*
2222 				 * Release the data buffer from the cache so
2223 				 * that we can modify it without impacting
2224 				 * possible other users of this cached data
2225 				 * block.  Note that indirect blocks and
2226 				 * private objects are not released until the
2227 				 * syncing state (since they are only modified
2228 				 * then).
2229 				 */
2230 				arc_release(db->db_buf, db);
2231 				dbuf_fix_old_data(db, tx->tx_txg);
2232 				data_old = db->db_buf;
2233 			}
2234 			ASSERT(data_old != NULL);
2235 		}
2236 		dr->dt.dl.dr_data = data_old;
2237 	} else {
2238 		mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_NOLOCKDEP, NULL);
2239 		list_create(&dr->dt.di.dr_children,
2240 		    sizeof (dbuf_dirty_record_t),
2241 		    offsetof(dbuf_dirty_record_t, dr_dirty_node));
2242 	}
2243 	if (db->db_blkid != DMU_BONUS_BLKID)
2244 		dr->dr_accounted = db->db.db_size;
2245 	dr->dr_dbuf = db;
2246 	dr->dr_txg = tx->tx_txg;
2247 	list_insert_before(&db->db_dirty_records, dr_next, dr);
2248 
2249 	/*
2250 	 * We could have been freed_in_flight between the dbuf_noread
2251 	 * and dbuf_dirty.  We win, as though the dbuf_noread() had
2252 	 * happened after the free.
2253 	 */
2254 	if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
2255 	    db->db_blkid != DMU_SPILL_BLKID) {
2256 		mutex_enter(&dn->dn_mtx);
2257 		if (dn->dn_free_ranges[txgoff] != NULL) {
2258 			range_tree_clear(dn->dn_free_ranges[txgoff],
2259 			    db->db_blkid, 1);
2260 		}
2261 		mutex_exit(&dn->dn_mtx);
2262 		db->db_freed_in_flight = FALSE;
2263 	}
2264 
2265 	/*
2266 	 * This buffer is now part of this txg
2267 	 */
2268 	dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg);
2269 	db->db_dirtycnt += 1;
2270 	ASSERT3U(db->db_dirtycnt, <=, 3);
2271 
2272 	mutex_exit(&db->db_mtx);
2273 
2274 	if (db->db_blkid == DMU_BONUS_BLKID ||
2275 	    db->db_blkid == DMU_SPILL_BLKID) {
2276 		mutex_enter(&dn->dn_mtx);
2277 		ASSERT(!list_link_active(&dr->dr_dirty_node));
2278 		list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
2279 		mutex_exit(&dn->dn_mtx);
2280 		dnode_setdirty(dn, tx);
2281 		DB_DNODE_EXIT(db);
2282 		return (dr);
2283 	}
2284 
2285 	if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
2286 		rw_enter(&dn->dn_struct_rwlock, RW_READER);
2287 		drop_struct_rwlock = B_TRUE;
2288 	}
2289 
2290 	/*
2291 	 * If we are overwriting a dedup BP, then unless it is snapshotted,
2292 	 * when we get to syncing context we will need to decrement its
2293 	 * refcount in the DDT.  Prefetch the relevant DDT block so that
2294 	 * syncing context won't have to wait for the i/o.
2295 	 */
2296 	if (db->db_blkptr != NULL) {
2297 		db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
2298 		ddt_prefetch(os->os_spa, db->db_blkptr);
2299 		dmu_buf_unlock_parent(db, dblt, FTAG);
2300 	}
2301 
2302 	/*
2303 	 * We need to hold the dn_struct_rwlock to make this assertion,
2304 	 * because it protects dn_phys / dn_next_nlevels from changing.
2305 	 */
2306 	ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) ||
2307 	    dn->dn_phys->dn_nlevels > db->db_level ||
2308 	    dn->dn_next_nlevels[txgoff] > db->db_level ||
2309 	    dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level ||
2310 	    dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level);
2311 
2312 
2313 	if (db->db_level == 0) {
2314 		ASSERT(!db->db_objset->os_raw_receive ||
2315 		    dn->dn_maxblkid >= db->db_blkid);
2316 		dnode_new_blkid(dn, db->db_blkid, tx,
2317 		    drop_struct_rwlock, B_FALSE);
2318 		ASSERT(dn->dn_maxblkid >= db->db_blkid);
2319 	}
2320 
2321 	if (db->db_level+1 < dn->dn_nlevels) {
2322 		dmu_buf_impl_t *parent = db->db_parent;
2323 		dbuf_dirty_record_t *di;
2324 		int parent_held = FALSE;
2325 
2326 		if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) {
2327 			int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
2328 			parent = dbuf_hold_level(dn, db->db_level + 1,
2329 			    db->db_blkid >> epbs, FTAG);
2330 			ASSERT(parent != NULL);
2331 			parent_held = TRUE;
2332 		}
2333 		if (drop_struct_rwlock)
2334 			rw_exit(&dn->dn_struct_rwlock);
2335 		ASSERT3U(db->db_level + 1, ==, parent->db_level);
2336 		di = dbuf_dirty(parent, tx);
2337 		if (parent_held)
2338 			dbuf_rele(parent, FTAG);
2339 
2340 		mutex_enter(&db->db_mtx);
2341 		/*
2342 		 * Since we've dropped the mutex, it's possible that
2343 		 * dbuf_undirty() might have changed this out from under us.
2344 		 */
2345 		if (list_head(&db->db_dirty_records) == dr ||
2346 		    dn->dn_object == DMU_META_DNODE_OBJECT) {
2347 			mutex_enter(&di->dt.di.dr_mtx);
2348 			ASSERT3U(di->dr_txg, ==, tx->tx_txg);
2349 			ASSERT(!list_link_active(&dr->dr_dirty_node));
2350 			list_insert_tail(&di->dt.di.dr_children, dr);
2351 			mutex_exit(&di->dt.di.dr_mtx);
2352 			dr->dr_parent = di;
2353 		}
2354 		mutex_exit(&db->db_mtx);
2355 	} else {
2356 		ASSERT(db->db_level + 1 == dn->dn_nlevels);
2357 		ASSERT(db->db_blkid < dn->dn_nblkptr);
2358 		ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf);
2359 		mutex_enter(&dn->dn_mtx);
2360 		ASSERT(!list_link_active(&dr->dr_dirty_node));
2361 		list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
2362 		mutex_exit(&dn->dn_mtx);
2363 		if (drop_struct_rwlock)
2364 			rw_exit(&dn->dn_struct_rwlock);
2365 	}
2366 
2367 	dnode_setdirty(dn, tx);
2368 	DB_DNODE_EXIT(db);
2369 	return (dr);
2370 }
2371 
2372 static void
dbuf_undirty_bonus(dbuf_dirty_record_t * dr)2373 dbuf_undirty_bonus(dbuf_dirty_record_t *dr)
2374 {
2375 	dmu_buf_impl_t *db = dr->dr_dbuf;
2376 
2377 	if (dr->dt.dl.dr_data != db->db.db_data) {
2378 		struct dnode *dn = dr->dr_dnode;
2379 		int max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
2380 
2381 		kmem_free(dr->dt.dl.dr_data, max_bonuslen);
2382 		arc_space_return(max_bonuslen, ARC_SPACE_BONUS);
2383 	}
2384 	db->db_data_pending = NULL;
2385 	ASSERT(list_next(&db->db_dirty_records, dr) == NULL);
2386 	list_remove(&db->db_dirty_records, dr);
2387 	if (dr->dr_dbuf->db_level != 0) {
2388 		mutex_destroy(&dr->dt.di.dr_mtx);
2389 		list_destroy(&dr->dt.di.dr_children);
2390 	}
2391 	kmem_free(dr, sizeof (dbuf_dirty_record_t));
2392 	ASSERT3U(db->db_dirtycnt, >, 0);
2393 	db->db_dirtycnt -= 1;
2394 }
2395 
2396 /*
2397  * Undirty a buffer in the transaction group referenced by the given
2398  * transaction.  Return whether this evicted the dbuf.
2399  */
2400 static boolean_t
dbuf_undirty(dmu_buf_impl_t * db,dmu_tx_t * tx)2401 dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
2402 {
2403 	uint64_t txg = tx->tx_txg;
2404 
2405 	ASSERT(txg != 0);
2406 
2407 	/*
2408 	 * Due to our use of dn_nlevels below, this can only be called
2409 	 * in open context, unless we are operating on the MOS.
2410 	 * From syncing context, dn_nlevels may be different from the
2411 	 * dn_nlevels used when dbuf was dirtied.
2412 	 */
2413 	ASSERT(db->db_objset ==
2414 	    dmu_objset_pool(db->db_objset)->dp_meta_objset ||
2415 	    txg != spa_syncing_txg(dmu_objset_spa(db->db_objset)));
2416 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2417 	ASSERT0(db->db_level);
2418 	ASSERT(MUTEX_HELD(&db->db_mtx));
2419 
2420 	/*
2421 	 * If this buffer is not dirty, we're done.
2422 	 */
2423 	dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, txg);
2424 	if (dr == NULL)
2425 		return (B_FALSE);
2426 	ASSERT(dr->dr_dbuf == db);
2427 
2428 	dnode_t *dn = dr->dr_dnode;
2429 
2430 	dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
2431 
2432 	ASSERT(db->db.db_size != 0);
2433 
2434 	dsl_pool_undirty_space(dmu_objset_pool(dn->dn_objset),
2435 	    dr->dr_accounted, txg);
2436 
2437 	list_remove(&db->db_dirty_records, dr);
2438 
2439 	/*
2440 	 * Note that there are three places in dbuf_dirty()
2441 	 * where this dirty record may be put on a list.
2442 	 * Make sure to do a list_remove corresponding to
2443 	 * every one of those list_insert calls.
2444 	 */
2445 	if (dr->dr_parent) {
2446 		mutex_enter(&dr->dr_parent->dt.di.dr_mtx);
2447 		list_remove(&dr->dr_parent->dt.di.dr_children, dr);
2448 		mutex_exit(&dr->dr_parent->dt.di.dr_mtx);
2449 	} else if (db->db_blkid == DMU_SPILL_BLKID ||
2450 	    db->db_level + 1 == dn->dn_nlevels) {
2451 		ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf);
2452 		mutex_enter(&dn->dn_mtx);
2453 		list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr);
2454 		mutex_exit(&dn->dn_mtx);
2455 	}
2456 
2457 	if (db->db_state != DB_NOFILL) {
2458 		dbuf_unoverride(dr);
2459 
2460 		ASSERT(db->db_buf != NULL);
2461 		ASSERT(dr->dt.dl.dr_data != NULL);
2462 		if (dr->dt.dl.dr_data != db->db_buf)
2463 			arc_buf_destroy(dr->dt.dl.dr_data, db);
2464 	}
2465 
2466 	kmem_free(dr, sizeof (dbuf_dirty_record_t));
2467 
2468 	ASSERT(db->db_dirtycnt > 0);
2469 	db->db_dirtycnt -= 1;
2470 
2471 	if (zfs_refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
2472 		ASSERT(db->db_state == DB_NOFILL || arc_released(db->db_buf));
2473 		dbuf_destroy(db);
2474 		return (B_TRUE);
2475 	}
2476 
2477 	return (B_FALSE);
2478 }
2479 
2480 static void
dmu_buf_will_dirty_impl(dmu_buf_t * db_fake,int flags,dmu_tx_t * tx)2481 dmu_buf_will_dirty_impl(dmu_buf_t *db_fake, int flags, dmu_tx_t *tx)
2482 {
2483 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2484 
2485 	ASSERT(tx->tx_txg != 0);
2486 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2487 
2488 	/*
2489 	 * Quick check for dirtiness.  For already dirty blocks, this
2490 	 * reduces runtime of this function by >90%, and overall performance
2491 	 * by 50% for some workloads (e.g. file deletion with indirect blocks
2492 	 * cached).
2493 	 */
2494 	mutex_enter(&db->db_mtx);
2495 
2496 	if (db->db_state == DB_CACHED) {
2497 		dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, tx->tx_txg);
2498 		/*
2499 		 * It's possible that it is already dirty but not cached,
2500 		 * because there are some calls to dbuf_dirty() that don't
2501 		 * go through dmu_buf_will_dirty().
2502 		 */
2503 		if (dr != NULL) {
2504 			/* This dbuf is already dirty and cached. */
2505 			dbuf_redirty(dr);
2506 			mutex_exit(&db->db_mtx);
2507 			return;
2508 		}
2509 	}
2510 	mutex_exit(&db->db_mtx);
2511 
2512 	DB_DNODE_ENTER(db);
2513 	if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock))
2514 		flags |= DB_RF_HAVESTRUCT;
2515 	DB_DNODE_EXIT(db);
2516 	(void) dbuf_read(db, NULL, flags);
2517 	(void) dbuf_dirty(db, tx);
2518 }
2519 
2520 void
dmu_buf_will_dirty(dmu_buf_t * db_fake,dmu_tx_t * tx)2521 dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
2522 {
2523 	dmu_buf_will_dirty_impl(db_fake,
2524 	    DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH, tx);
2525 }
2526 
2527 boolean_t
dmu_buf_is_dirty(dmu_buf_t * db_fake,dmu_tx_t * tx)2528 dmu_buf_is_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
2529 {
2530 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2531 	dbuf_dirty_record_t *dr;
2532 
2533 	mutex_enter(&db->db_mtx);
2534 	dr = dbuf_find_dirty_eq(db, tx->tx_txg);
2535 	mutex_exit(&db->db_mtx);
2536 	return (dr != NULL);
2537 }
2538 
2539 void
dmu_buf_will_not_fill(dmu_buf_t * db_fake,dmu_tx_t * tx)2540 dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
2541 {
2542 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2543 
2544 	db->db_state = DB_NOFILL;
2545 	DTRACE_SET_STATE(db, "allocating NOFILL buffer");
2546 	dmu_buf_will_fill(db_fake, tx);
2547 }
2548 
2549 void
dmu_buf_will_fill(dmu_buf_t * db_fake,dmu_tx_t * tx)2550 dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
2551 {
2552 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2553 
2554 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2555 	ASSERT(tx->tx_txg != 0);
2556 	ASSERT(db->db_level == 0);
2557 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2558 
2559 	ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT ||
2560 	    dmu_tx_private_ok(tx));
2561 
2562 	dbuf_noread(db);
2563 	(void) dbuf_dirty(db, tx);
2564 }
2565 
2566 /*
2567  * This function is effectively the same as dmu_buf_will_dirty(), but
2568  * indicates the caller expects raw encrypted data in the db, and provides
2569  * the crypt params (byteorder, salt, iv, mac) which should be stored in the
2570  * blkptr_t when this dbuf is written.  This is only used for blocks of
2571  * dnodes, during raw receive.
2572  */
2573 void
dmu_buf_set_crypt_params(dmu_buf_t * db_fake,boolean_t byteorder,const uint8_t * salt,const uint8_t * iv,const uint8_t * mac,dmu_tx_t * tx)2574 dmu_buf_set_crypt_params(dmu_buf_t *db_fake, boolean_t byteorder,
2575     const uint8_t *salt, const uint8_t *iv, const uint8_t *mac, dmu_tx_t *tx)
2576 {
2577 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2578 	dbuf_dirty_record_t *dr;
2579 
2580 	/*
2581 	 * dr_has_raw_params is only processed for blocks of dnodes
2582 	 * (see dbuf_sync_dnode_leaf_crypt()).
2583 	 */
2584 	ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT);
2585 	ASSERT3U(db->db_level, ==, 0);
2586 	ASSERT(db->db_objset->os_raw_receive);
2587 
2588 	dmu_buf_will_dirty_impl(db_fake,
2589 	    DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_NO_DECRYPT, tx);
2590 
2591 	dr = dbuf_find_dirty_eq(db, tx->tx_txg);
2592 
2593 	ASSERT3P(dr, !=, NULL);
2594 
2595 	dr->dt.dl.dr_has_raw_params = B_TRUE;
2596 	dr->dt.dl.dr_byteorder = byteorder;
2597 	bcopy(salt, dr->dt.dl.dr_salt, ZIO_DATA_SALT_LEN);
2598 	bcopy(iv, dr->dt.dl.dr_iv, ZIO_DATA_IV_LEN);
2599 	bcopy(mac, dr->dt.dl.dr_mac, ZIO_DATA_MAC_LEN);
2600 }
2601 
2602 static void
dbuf_override_impl(dmu_buf_impl_t * db,const blkptr_t * bp,dmu_tx_t * tx)2603 dbuf_override_impl(dmu_buf_impl_t *db, const blkptr_t *bp, dmu_tx_t *tx)
2604 {
2605 	struct dirty_leaf *dl;
2606 	dbuf_dirty_record_t *dr;
2607 
2608 	dr = list_head(&db->db_dirty_records);
2609 	ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
2610 	dl = &dr->dt.dl;
2611 	dl->dr_overridden_by = *bp;
2612 	dl->dr_override_state = DR_OVERRIDDEN;
2613 	dl->dr_overridden_by.blk_birth = dr->dr_txg;
2614 }
2615 
2616 void
dmu_buf_fill_done(dmu_buf_t * dbuf,dmu_tx_t * tx)2617 dmu_buf_fill_done(dmu_buf_t *dbuf, dmu_tx_t *tx)
2618 {
2619 	(void) tx;
2620 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2621 	dbuf_states_t old_state;
2622 	mutex_enter(&db->db_mtx);
2623 	DBUF_VERIFY(db);
2624 
2625 	old_state = db->db_state;
2626 	db->db_state = DB_CACHED;
2627 	if (old_state == DB_FILL) {
2628 		if (db->db_level == 0 && db->db_freed_in_flight) {
2629 			ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2630 			/* we were freed while filling */
2631 			/* XXX dbuf_undirty? */
2632 			bzero(db->db.db_data, db->db.db_size);
2633 			db->db_freed_in_flight = FALSE;
2634 			DTRACE_SET_STATE(db,
2635 			    "fill done handling freed in flight");
2636 		} else {
2637 			DTRACE_SET_STATE(db, "fill done");
2638 		}
2639 		cv_broadcast(&db->db_changed);
2640 	}
2641 	mutex_exit(&db->db_mtx);
2642 }
2643 
2644 void
dmu_buf_write_embedded(dmu_buf_t * dbuf,void * data,bp_embedded_type_t etype,enum zio_compress comp,int uncompressed_size,int compressed_size,int byteorder,dmu_tx_t * tx)2645 dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data,
2646     bp_embedded_type_t etype, enum zio_compress comp,
2647     int uncompressed_size, int compressed_size, int byteorder,
2648     dmu_tx_t *tx)
2649 {
2650 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2651 	struct dirty_leaf *dl;
2652 	dmu_object_type_t type;
2653 	dbuf_dirty_record_t *dr;
2654 
2655 	if (etype == BP_EMBEDDED_TYPE_DATA) {
2656 		ASSERT(spa_feature_is_active(dmu_objset_spa(db->db_objset),
2657 		    SPA_FEATURE_EMBEDDED_DATA));
2658 	}
2659 
2660 	DB_DNODE_ENTER(db);
2661 	type = DB_DNODE(db)->dn_type;
2662 	DB_DNODE_EXIT(db);
2663 
2664 	ASSERT0(db->db_level);
2665 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2666 
2667 	dmu_buf_will_not_fill(dbuf, tx);
2668 
2669 	dr = list_head(&db->db_dirty_records);
2670 	ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
2671 	dl = &dr->dt.dl;
2672 	encode_embedded_bp_compressed(&dl->dr_overridden_by,
2673 	    data, comp, uncompressed_size, compressed_size);
2674 	BPE_SET_ETYPE(&dl->dr_overridden_by, etype);
2675 	BP_SET_TYPE(&dl->dr_overridden_by, type);
2676 	BP_SET_LEVEL(&dl->dr_overridden_by, 0);
2677 	BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder);
2678 
2679 	dl->dr_override_state = DR_OVERRIDDEN;
2680 	dl->dr_overridden_by.blk_birth = dr->dr_txg;
2681 }
2682 
2683 void
dmu_buf_redact(dmu_buf_t * dbuf,dmu_tx_t * tx)2684 dmu_buf_redact(dmu_buf_t *dbuf, dmu_tx_t *tx)
2685 {
2686 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2687 	dmu_object_type_t type;
2688 	ASSERT(dsl_dataset_feature_is_active(db->db_objset->os_dsl_dataset,
2689 	    SPA_FEATURE_REDACTED_DATASETS));
2690 
2691 	DB_DNODE_ENTER(db);
2692 	type = DB_DNODE(db)->dn_type;
2693 	DB_DNODE_EXIT(db);
2694 
2695 	ASSERT0(db->db_level);
2696 	dmu_buf_will_not_fill(dbuf, tx);
2697 
2698 	blkptr_t bp = { { { {0} } } };
2699 	BP_SET_TYPE(&bp, type);
2700 	BP_SET_LEVEL(&bp, 0);
2701 	BP_SET_BIRTH(&bp, tx->tx_txg, 0);
2702 	BP_SET_REDACTED(&bp);
2703 	BPE_SET_LSIZE(&bp, dbuf->db_size);
2704 
2705 	dbuf_override_impl(db, &bp, tx);
2706 }
2707 
2708 /*
2709  * Directly assign a provided arc buf to a given dbuf if it's not referenced
2710  * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf.
2711  */
2712 void
dbuf_assign_arcbuf(dmu_buf_impl_t * db,arc_buf_t * buf,dmu_tx_t * tx)2713 dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
2714 {
2715 	ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2716 	ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2717 	ASSERT(db->db_level == 0);
2718 	ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf));
2719 	ASSERT(buf != NULL);
2720 	ASSERT3U(arc_buf_lsize(buf), ==, db->db.db_size);
2721 	ASSERT(tx->tx_txg != 0);
2722 
2723 	arc_return_buf(buf, db);
2724 	ASSERT(arc_released(buf));
2725 
2726 	mutex_enter(&db->db_mtx);
2727 
2728 	while (db->db_state == DB_READ || db->db_state == DB_FILL)
2729 		cv_wait(&db->db_changed, &db->db_mtx);
2730 
2731 	ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED);
2732 
2733 	if (db->db_state == DB_CACHED &&
2734 	    zfs_refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
2735 		/*
2736 		 * In practice, we will never have a case where we have an
2737 		 * encrypted arc buffer while additional holds exist on the
2738 		 * dbuf. We don't handle this here so we simply assert that
2739 		 * fact instead.
2740 		 */
2741 		ASSERT(!arc_is_encrypted(buf));
2742 		mutex_exit(&db->db_mtx);
2743 		(void) dbuf_dirty(db, tx);
2744 		bcopy(buf->b_data, db->db.db_data, db->db.db_size);
2745 		arc_buf_destroy(buf, db);
2746 		return;
2747 	}
2748 
2749 	if (db->db_state == DB_CACHED) {
2750 		dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records);
2751 
2752 		ASSERT(db->db_buf != NULL);
2753 		if (dr != NULL && dr->dr_txg == tx->tx_txg) {
2754 			ASSERT(dr->dt.dl.dr_data == db->db_buf);
2755 
2756 			if (!arc_released(db->db_buf)) {
2757 				ASSERT(dr->dt.dl.dr_override_state ==
2758 				    DR_OVERRIDDEN);
2759 				arc_release(db->db_buf, db);
2760 			}
2761 			dr->dt.dl.dr_data = buf;
2762 			arc_buf_destroy(db->db_buf, db);
2763 		} else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) {
2764 			arc_release(db->db_buf, db);
2765 			arc_buf_destroy(db->db_buf, db);
2766 		}
2767 		db->db_buf = NULL;
2768 	}
2769 	ASSERT(db->db_buf == NULL);
2770 	dbuf_set_data(db, buf);
2771 	db->db_state = DB_FILL;
2772 	DTRACE_SET_STATE(db, "filling assigned arcbuf");
2773 	mutex_exit(&db->db_mtx);
2774 	(void) dbuf_dirty(db, tx);
2775 	dmu_buf_fill_done(&db->db, tx);
2776 }
2777 
2778 void
dbuf_destroy(dmu_buf_impl_t * db)2779 dbuf_destroy(dmu_buf_impl_t *db)
2780 {
2781 	dnode_t *dn;
2782 	dmu_buf_impl_t *parent = db->db_parent;
2783 	dmu_buf_impl_t *dndb;
2784 
2785 	ASSERT(MUTEX_HELD(&db->db_mtx));
2786 	ASSERT(zfs_refcount_is_zero(&db->db_holds));
2787 
2788 	if (db->db_buf != NULL) {
2789 		arc_buf_destroy(db->db_buf, db);
2790 		db->db_buf = NULL;
2791 	}
2792 
2793 	if (db->db_blkid == DMU_BONUS_BLKID) {
2794 		int slots = DB_DNODE(db)->dn_num_slots;
2795 		int bonuslen = DN_SLOTS_TO_BONUSLEN(slots);
2796 		if (db->db.db_data != NULL) {
2797 			kmem_free(db->db.db_data, bonuslen);
2798 			arc_space_return(bonuslen, ARC_SPACE_BONUS);
2799 			db->db_state = DB_UNCACHED;
2800 			DTRACE_SET_STATE(db, "buffer cleared");
2801 		}
2802 	}
2803 
2804 	dbuf_clear_data(db);
2805 
2806 	if (multilist_link_active(&db->db_cache_link)) {
2807 		ASSERT(db->db_caching_status == DB_DBUF_CACHE ||
2808 		    db->db_caching_status == DB_DBUF_METADATA_CACHE);
2809 
2810 		multilist_remove(&dbuf_caches[db->db_caching_status].cache, db);
2811 		(void) zfs_refcount_remove_many(
2812 		    &dbuf_caches[db->db_caching_status].size,
2813 		    db->db.db_size, db);
2814 
2815 		if (db->db_caching_status == DB_DBUF_METADATA_CACHE) {
2816 			DBUF_STAT_BUMPDOWN(metadata_cache_count);
2817 		} else {
2818 			DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
2819 			DBUF_STAT_BUMPDOWN(cache_count);
2820 			DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
2821 			    db->db.db_size);
2822 		}
2823 		db->db_caching_status = DB_NO_CACHE;
2824 	}
2825 
2826 	ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
2827 	ASSERT(db->db_data_pending == NULL);
2828 	ASSERT(list_is_empty(&db->db_dirty_records));
2829 
2830 	db->db_state = DB_EVICTING;
2831 	DTRACE_SET_STATE(db, "buffer eviction started");
2832 	db->db_blkptr = NULL;
2833 
2834 	/*
2835 	 * Now that db_state is DB_EVICTING, nobody else can find this via
2836 	 * the hash table.  We can now drop db_mtx, which allows us to
2837 	 * acquire the dn_dbufs_mtx.
2838 	 */
2839 	mutex_exit(&db->db_mtx);
2840 
2841 	DB_DNODE_ENTER(db);
2842 	dn = DB_DNODE(db);
2843 	dndb = dn->dn_dbuf;
2844 	if (db->db_blkid != DMU_BONUS_BLKID) {
2845 		boolean_t needlock = !MUTEX_HELD(&dn->dn_dbufs_mtx);
2846 		if (needlock)
2847 			mutex_enter_nested(&dn->dn_dbufs_mtx,
2848 			    NESTED_SINGLE);
2849 		avl_remove(&dn->dn_dbufs, db);
2850 		membar_producer();
2851 		DB_DNODE_EXIT(db);
2852 		if (needlock)
2853 			mutex_exit(&dn->dn_dbufs_mtx);
2854 		/*
2855 		 * Decrementing the dbuf count means that the hold corresponding
2856 		 * to the removed dbuf is no longer discounted in dnode_move(),
2857 		 * so the dnode cannot be moved until after we release the hold.
2858 		 * The membar_producer() ensures visibility of the decremented
2859 		 * value in dnode_move(), since DB_DNODE_EXIT doesn't actually
2860 		 * release any lock.
2861 		 */
2862 		mutex_enter(&dn->dn_mtx);
2863 		dnode_rele_and_unlock(dn, db, B_TRUE);
2864 		db->db_dnode_handle = NULL;
2865 
2866 		dbuf_hash_remove(db);
2867 	} else {
2868 		DB_DNODE_EXIT(db);
2869 	}
2870 
2871 	ASSERT(zfs_refcount_is_zero(&db->db_holds));
2872 
2873 	db->db_parent = NULL;
2874 
2875 	ASSERT(db->db_buf == NULL);
2876 	ASSERT(db->db.db_data == NULL);
2877 	ASSERT(db->db_hash_next == NULL);
2878 	ASSERT(db->db_blkptr == NULL);
2879 	ASSERT(db->db_data_pending == NULL);
2880 	ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
2881 	ASSERT(!multilist_link_active(&db->db_cache_link));
2882 
2883 	kmem_cache_free(dbuf_kmem_cache, db);
2884 	arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
2885 
2886 	/*
2887 	 * If this dbuf is referenced from an indirect dbuf,
2888 	 * decrement the ref count on the indirect dbuf.
2889 	 */
2890 	if (parent && parent != dndb) {
2891 		mutex_enter(&parent->db_mtx);
2892 		dbuf_rele_and_unlock(parent, db, B_TRUE);
2893 	}
2894 }
2895 
2896 /*
2897  * Note: While bpp will always be updated if the function returns success,
2898  * parentp will not be updated if the dnode does not have dn_dbuf filled in;
2899  * this happens when the dnode is the meta-dnode, or {user|group|project}used
2900  * object.
2901  */
2902 __attribute__((always_inline))
2903 static inline int
dbuf_findbp(dnode_t * dn,int level,uint64_t blkid,int fail_sparse,dmu_buf_impl_t ** parentp,blkptr_t ** bpp)2904 dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
2905     dmu_buf_impl_t **parentp, blkptr_t **bpp)
2906 {
2907 	*parentp = NULL;
2908 	*bpp = NULL;
2909 
2910 	ASSERT(blkid != DMU_BONUS_BLKID);
2911 
2912 	if (blkid == DMU_SPILL_BLKID) {
2913 		mutex_enter(&dn->dn_mtx);
2914 		if (dn->dn_have_spill &&
2915 		    (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
2916 			*bpp = DN_SPILL_BLKPTR(dn->dn_phys);
2917 		else
2918 			*bpp = NULL;
2919 		dbuf_add_ref(dn->dn_dbuf, NULL);
2920 		*parentp = dn->dn_dbuf;
2921 		mutex_exit(&dn->dn_mtx);
2922 		return (0);
2923 	}
2924 
2925 	int nlevels =
2926 	    (dn->dn_phys->dn_nlevels == 0) ? 1 : dn->dn_phys->dn_nlevels;
2927 	int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
2928 
2929 	ASSERT3U(level * epbs, <, 64);
2930 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
2931 	/*
2932 	 * This assertion shouldn't trip as long as the max indirect block size
2933 	 * is less than 1M.  The reason for this is that up to that point,
2934 	 * the number of levels required to address an entire object with blocks
2935 	 * of size SPA_MINBLOCKSIZE satisfies nlevels * epbs + 1 <= 64.	 In
2936 	 * other words, if N * epbs + 1 > 64, then if (N-1) * epbs + 1 > 55
2937 	 * (i.e. we can address the entire object), objects will all use at most
2938 	 * N-1 levels and the assertion won't overflow.	 However, once epbs is
2939 	 * 13, 4 * 13 + 1 = 53, but 5 * 13 + 1 = 66.  Then, 4 levels will not be
2940 	 * enough to address an entire object, so objects will have 5 levels,
2941 	 * but then this assertion will overflow.
2942 	 *
2943 	 * All this is to say that if we ever increase DN_MAX_INDBLKSHIFT, we
2944 	 * need to redo this logic to handle overflows.
2945 	 */
2946 	ASSERT(level >= nlevels ||
2947 	    ((nlevels - level - 1) * epbs) +
2948 	    highbit64(dn->dn_phys->dn_nblkptr) <= 64);
2949 	if (level >= nlevels ||
2950 	    blkid >= ((uint64_t)dn->dn_phys->dn_nblkptr <<
2951 	    ((nlevels - level - 1) * epbs)) ||
2952 	    (fail_sparse &&
2953 	    blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) {
2954 		/* the buffer has no parent yet */
2955 		return (SET_ERROR(ENOENT));
2956 	} else if (level < nlevels-1) {
2957 		/* this block is referenced from an indirect block */
2958 		int err;
2959 
2960 		err = dbuf_hold_impl(dn, level + 1,
2961 		    blkid >> epbs, fail_sparse, FALSE, NULL, parentp);
2962 
2963 		if (err)
2964 			return (err);
2965 		err = dbuf_read(*parentp, NULL,
2966 		    (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL));
2967 		if (err) {
2968 			dbuf_rele(*parentp, NULL);
2969 			*parentp = NULL;
2970 			return (err);
2971 		}
2972 		rw_enter(&(*parentp)->db_rwlock, RW_READER);
2973 		*bpp = ((blkptr_t *)(*parentp)->db.db_data) +
2974 		    (blkid & ((1ULL << epbs) - 1));
2975 		if (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))
2976 			ASSERT(BP_IS_HOLE(*bpp));
2977 		rw_exit(&(*parentp)->db_rwlock);
2978 		return (0);
2979 	} else {
2980 		/* the block is referenced from the dnode */
2981 		ASSERT3U(level, ==, nlevels-1);
2982 		ASSERT(dn->dn_phys->dn_nblkptr == 0 ||
2983 		    blkid < dn->dn_phys->dn_nblkptr);
2984 		if (dn->dn_dbuf) {
2985 			dbuf_add_ref(dn->dn_dbuf, NULL);
2986 			*parentp = dn->dn_dbuf;
2987 		}
2988 		*bpp = &dn->dn_phys->dn_blkptr[blkid];
2989 		return (0);
2990 	}
2991 }
2992 
2993 static dmu_buf_impl_t *
dbuf_create(dnode_t * dn,uint8_t level,uint64_t blkid,dmu_buf_impl_t * parent,blkptr_t * blkptr)2994 dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
2995     dmu_buf_impl_t *parent, blkptr_t *blkptr)
2996 {
2997 	objset_t *os = dn->dn_objset;
2998 	dmu_buf_impl_t *db, *odb;
2999 
3000 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3001 	ASSERT(dn->dn_type != DMU_OT_NONE);
3002 
3003 	db = kmem_cache_alloc(dbuf_kmem_cache, KM_SLEEP);
3004 
3005 	list_create(&db->db_dirty_records, sizeof (dbuf_dirty_record_t),
3006 	    offsetof(dbuf_dirty_record_t, dr_dbuf_node));
3007 
3008 	db->db_objset = os;
3009 	db->db.db_object = dn->dn_object;
3010 	db->db_level = level;
3011 	db->db_blkid = blkid;
3012 	db->db_dirtycnt = 0;
3013 	db->db_dnode_handle = dn->dn_handle;
3014 	db->db_parent = parent;
3015 	db->db_blkptr = blkptr;
3016 
3017 	db->db_user = NULL;
3018 	db->db_user_immediate_evict = FALSE;
3019 	db->db_freed_in_flight = FALSE;
3020 	db->db_pending_evict = FALSE;
3021 
3022 	if (blkid == DMU_BONUS_BLKID) {
3023 		ASSERT3P(parent, ==, dn->dn_dbuf);
3024 		db->db.db_size = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) -
3025 		    (dn->dn_nblkptr-1) * sizeof (blkptr_t);
3026 		ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
3027 		db->db.db_offset = DMU_BONUS_BLKID;
3028 		db->db_state = DB_UNCACHED;
3029 		DTRACE_SET_STATE(db, "bonus buffer created");
3030 		db->db_caching_status = DB_NO_CACHE;
3031 		/* the bonus dbuf is not placed in the hash table */
3032 		arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
3033 		return (db);
3034 	} else if (blkid == DMU_SPILL_BLKID) {
3035 		db->db.db_size = (blkptr != NULL) ?
3036 		    BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE;
3037 		db->db.db_offset = 0;
3038 	} else {
3039 		int blocksize =
3040 		    db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz;
3041 		db->db.db_size = blocksize;
3042 		db->db.db_offset = db->db_blkid * blocksize;
3043 	}
3044 
3045 	/*
3046 	 * Hold the dn_dbufs_mtx while we get the new dbuf
3047 	 * in the hash table *and* added to the dbufs list.
3048 	 * This prevents a possible deadlock with someone
3049 	 * trying to look up this dbuf before it's added to the
3050 	 * dn_dbufs list.
3051 	 */
3052 	mutex_enter(&dn->dn_dbufs_mtx);
3053 	db->db_state = DB_EVICTING; /* not worth logging this state change */
3054 	if ((odb = dbuf_hash_insert(db)) != NULL) {
3055 		/* someone else inserted it first */
3056 		mutex_exit(&dn->dn_dbufs_mtx);
3057 		kmem_cache_free(dbuf_kmem_cache, db);
3058 		DBUF_STAT_BUMP(hash_insert_race);
3059 		return (odb);
3060 	}
3061 	avl_add(&dn->dn_dbufs, db);
3062 
3063 	db->db_state = DB_UNCACHED;
3064 	DTRACE_SET_STATE(db, "regular buffer created");
3065 	db->db_caching_status = DB_NO_CACHE;
3066 	mutex_exit(&dn->dn_dbufs_mtx);
3067 	arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
3068 
3069 	if (parent && parent != dn->dn_dbuf)
3070 		dbuf_add_ref(parent, db);
3071 
3072 	ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
3073 	    zfs_refcount_count(&dn->dn_holds) > 0);
3074 	(void) zfs_refcount_add(&dn->dn_holds, db);
3075 
3076 	dprintf_dbuf(db, "db=%p\n", db);
3077 
3078 	return (db);
3079 }
3080 
3081 /*
3082  * This function returns a block pointer and information about the object,
3083  * given a dnode and a block.  This is a publicly accessible version of
3084  * dbuf_findbp that only returns some information, rather than the
3085  * dbuf.  Note that the dnode passed in must be held, and the dn_struct_rwlock
3086  * should be locked as (at least) a reader.
3087  */
3088 int
dbuf_dnode_findbp(dnode_t * dn,uint64_t level,uint64_t blkid,blkptr_t * bp,uint16_t * datablkszsec,uint8_t * indblkshift)3089 dbuf_dnode_findbp(dnode_t *dn, uint64_t level, uint64_t blkid,
3090     blkptr_t *bp, uint16_t *datablkszsec, uint8_t *indblkshift)
3091 {
3092 	dmu_buf_impl_t *dbp = NULL;
3093 	blkptr_t *bp2;
3094 	int err = 0;
3095 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3096 
3097 	err = dbuf_findbp(dn, level, blkid, B_FALSE, &dbp, &bp2);
3098 	if (err == 0) {
3099 		*bp = *bp2;
3100 		if (dbp != NULL)
3101 			dbuf_rele(dbp, NULL);
3102 		if (datablkszsec != NULL)
3103 			*datablkszsec = dn->dn_phys->dn_datablkszsec;
3104 		if (indblkshift != NULL)
3105 			*indblkshift = dn->dn_phys->dn_indblkshift;
3106 	}
3107 
3108 	return (err);
3109 }
3110 
3111 typedef struct dbuf_prefetch_arg {
3112 	spa_t *dpa_spa;	/* The spa to issue the prefetch in. */
3113 	zbookmark_phys_t dpa_zb; /* The target block to prefetch. */
3114 	int dpa_epbs; /* Entries (blkptr_t's) Per Block Shift. */
3115 	int dpa_curlevel; /* The current level that we're reading */
3116 	dnode_t *dpa_dnode; /* The dnode associated with the prefetch */
3117 	zio_priority_t dpa_prio; /* The priority I/Os should be issued at. */
3118 	zio_t *dpa_zio; /* The parent zio_t for all prefetches. */
3119 	arc_flags_t dpa_aflags; /* Flags to pass to the final prefetch. */
3120 	dbuf_prefetch_fn dpa_cb; /* prefetch completion callback */
3121 	void *dpa_arg; /* prefetch completion arg */
3122 } dbuf_prefetch_arg_t;
3123 
3124 static void
dbuf_prefetch_fini(dbuf_prefetch_arg_t * dpa,boolean_t io_done)3125 dbuf_prefetch_fini(dbuf_prefetch_arg_t *dpa, boolean_t io_done)
3126 {
3127 	if (dpa->dpa_cb != NULL)
3128 		dpa->dpa_cb(dpa->dpa_arg, io_done);
3129 	kmem_free(dpa, sizeof (*dpa));
3130 }
3131 
3132 static void
dbuf_issue_final_prefetch_done(zio_t * zio,const zbookmark_phys_t * zb,const blkptr_t * iobp,arc_buf_t * abuf,void * private)3133 dbuf_issue_final_prefetch_done(zio_t *zio, const zbookmark_phys_t *zb,
3134     const blkptr_t *iobp, arc_buf_t *abuf, void *private)
3135 {
3136 	(void) zio, (void) zb, (void) iobp;
3137 	dbuf_prefetch_arg_t *dpa = private;
3138 
3139 	dbuf_prefetch_fini(dpa, B_TRUE);
3140 	if (abuf != NULL)
3141 		arc_buf_destroy(abuf, private);
3142 }
3143 
3144 /*
3145  * Actually issue the prefetch read for the block given.
3146  */
3147 static void
dbuf_issue_final_prefetch(dbuf_prefetch_arg_t * dpa,blkptr_t * bp)3148 dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp)
3149 {
3150 	ASSERT(!BP_IS_REDACTED(bp) ||
3151 	    dsl_dataset_feature_is_active(
3152 	    dpa->dpa_dnode->dn_objset->os_dsl_dataset,
3153 	    SPA_FEATURE_REDACTED_DATASETS));
3154 
3155 	if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp) || BP_IS_REDACTED(bp))
3156 		return (dbuf_prefetch_fini(dpa, B_FALSE));
3157 
3158 	int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE;
3159 	arc_flags_t aflags =
3160 	    dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH |
3161 	    ARC_FLAG_NO_BUF;
3162 
3163 	/* dnodes are always read as raw and then converted later */
3164 	if (BP_GET_TYPE(bp) == DMU_OT_DNODE && BP_IS_PROTECTED(bp) &&
3165 	    dpa->dpa_curlevel == 0)
3166 		zio_flags |= ZIO_FLAG_RAW;
3167 
3168 	ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
3169 	ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level);
3170 	ASSERT(dpa->dpa_zio != NULL);
3171 	(void) arc_read(dpa->dpa_zio, dpa->dpa_spa, bp,
3172 	    dbuf_issue_final_prefetch_done, dpa,
3173 	    dpa->dpa_prio, zio_flags, &aflags, &dpa->dpa_zb);
3174 }
3175 
3176 /*
3177  * Called when an indirect block above our prefetch target is read in.  This
3178  * will either read in the next indirect block down the tree or issue the actual
3179  * prefetch if the next block down is our target.
3180  */
3181 static void
dbuf_prefetch_indirect_done(zio_t * zio,const zbookmark_phys_t * zb,const blkptr_t * iobp,arc_buf_t * abuf,void * private)3182 dbuf_prefetch_indirect_done(zio_t *zio, const zbookmark_phys_t *zb,
3183     const blkptr_t *iobp, arc_buf_t *abuf, void *private)
3184 {
3185 	(void) zb, (void) iobp;
3186 	dbuf_prefetch_arg_t *dpa = private;
3187 
3188 	ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel);
3189 	ASSERT3S(dpa->dpa_curlevel, >, 0);
3190 
3191 	if (abuf == NULL) {
3192 		ASSERT(zio == NULL || zio->io_error != 0);
3193 		return (dbuf_prefetch_fini(dpa, B_TRUE));
3194 	}
3195 	ASSERT(zio == NULL || zio->io_error == 0);
3196 
3197 	/*
3198 	 * The dpa_dnode is only valid if we are called with a NULL
3199 	 * zio. This indicates that the arc_read() returned without
3200 	 * first calling zio_read() to issue a physical read. Once
3201 	 * a physical read is made the dpa_dnode must be invalidated
3202 	 * as the locks guarding it may have been dropped. If the
3203 	 * dpa_dnode is still valid, then we want to add it to the dbuf
3204 	 * cache. To do so, we must hold the dbuf associated with the block
3205 	 * we just prefetched, read its contents so that we associate it
3206 	 * with an arc_buf_t, and then release it.
3207 	 */
3208 	if (zio != NULL) {
3209 		ASSERT3S(BP_GET_LEVEL(zio->io_bp), ==, dpa->dpa_curlevel);
3210 		if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS) {
3211 			ASSERT3U(BP_GET_PSIZE(zio->io_bp), ==, zio->io_size);
3212 		} else {
3213 			ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size);
3214 		}
3215 		ASSERT3P(zio->io_spa, ==, dpa->dpa_spa);
3216 
3217 		dpa->dpa_dnode = NULL;
3218 	} else if (dpa->dpa_dnode != NULL) {
3219 		uint64_t curblkid = dpa->dpa_zb.zb_blkid >>
3220 		    (dpa->dpa_epbs * (dpa->dpa_curlevel -
3221 		    dpa->dpa_zb.zb_level));
3222 		dmu_buf_impl_t *db = dbuf_hold_level(dpa->dpa_dnode,
3223 		    dpa->dpa_curlevel, curblkid, FTAG);
3224 		if (db == NULL) {
3225 			arc_buf_destroy(abuf, private);
3226 			return (dbuf_prefetch_fini(dpa, B_TRUE));
3227 		}
3228 		(void) dbuf_read(db, NULL,
3229 		    DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_HAVESTRUCT);
3230 		dbuf_rele(db, FTAG);
3231 	}
3232 
3233 	dpa->dpa_curlevel--;
3234 	uint64_t nextblkid = dpa->dpa_zb.zb_blkid >>
3235 	    (dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level));
3236 	blkptr_t *bp = ((blkptr_t *)abuf->b_data) +
3237 	    P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs);
3238 
3239 	ASSERT(!BP_IS_REDACTED(bp) ||
3240 	    dsl_dataset_feature_is_active(
3241 	    dpa->dpa_dnode->dn_objset->os_dsl_dataset,
3242 	    SPA_FEATURE_REDACTED_DATASETS));
3243 	if (BP_IS_HOLE(bp) || BP_IS_REDACTED(bp)) {
3244 		dbuf_prefetch_fini(dpa, B_TRUE);
3245 	} else if (dpa->dpa_curlevel == dpa->dpa_zb.zb_level) {
3246 		ASSERT3U(nextblkid, ==, dpa->dpa_zb.zb_blkid);
3247 		dbuf_issue_final_prefetch(dpa, bp);
3248 	} else {
3249 		arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
3250 		zbookmark_phys_t zb;
3251 
3252 		/* flag if L2ARC eligible, l2arc_noprefetch then decides */
3253 		if (dpa->dpa_aflags & ARC_FLAG_L2CACHE)
3254 			iter_aflags |= ARC_FLAG_L2CACHE;
3255 
3256 		ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
3257 
3258 		SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset,
3259 		    dpa->dpa_zb.zb_object, dpa->dpa_curlevel, nextblkid);
3260 
3261 		(void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
3262 		    bp, dbuf_prefetch_indirect_done, dpa, dpa->dpa_prio,
3263 		    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
3264 		    &iter_aflags, &zb);
3265 	}
3266 
3267 	arc_buf_destroy(abuf, private);
3268 }
3269 
3270 /*
3271  * Issue prefetch reads for the given block on the given level.  If the indirect
3272  * blocks above that block are not in memory, we will read them in
3273  * asynchronously.  As a result, this call never blocks waiting for a read to
3274  * complete. Note that the prefetch might fail if the dataset is encrypted and
3275  * the encryption key is unmapped before the IO completes.
3276  */
3277 int
dbuf_prefetch_impl(dnode_t * dn,int64_t level,uint64_t blkid,zio_priority_t prio,arc_flags_t aflags,dbuf_prefetch_fn cb,void * arg)3278 dbuf_prefetch_impl(dnode_t *dn, int64_t level, uint64_t blkid,
3279     zio_priority_t prio, arc_flags_t aflags, dbuf_prefetch_fn cb,
3280     void *arg)
3281 {
3282 	blkptr_t bp;
3283 	int epbs, nlevels, curlevel;
3284 	uint64_t curblkid;
3285 
3286 	ASSERT(blkid != DMU_BONUS_BLKID);
3287 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3288 
3289 	if (blkid > dn->dn_maxblkid)
3290 		goto no_issue;
3291 
3292 	if (level == 0 && dnode_block_freed(dn, blkid))
3293 		goto no_issue;
3294 
3295 	/*
3296 	 * This dnode hasn't been written to disk yet, so there's nothing to
3297 	 * prefetch.
3298 	 */
3299 	nlevels = dn->dn_phys->dn_nlevels;
3300 	if (level >= nlevels || dn->dn_phys->dn_nblkptr == 0)
3301 		goto no_issue;
3302 
3303 	epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
3304 	if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level))
3305 		goto no_issue;
3306 
3307 	dmu_buf_impl_t *db = dbuf_find(dn->dn_objset, dn->dn_object,
3308 	    level, blkid);
3309 	if (db != NULL) {
3310 		mutex_exit(&db->db_mtx);
3311 		/*
3312 		 * This dbuf already exists.  It is either CACHED, or
3313 		 * (we assume) about to be read or filled.
3314 		 */
3315 		goto no_issue;
3316 	}
3317 
3318 	/*
3319 	 * Find the closest ancestor (indirect block) of the target block
3320 	 * that is present in the cache.  In this indirect block, we will
3321 	 * find the bp that is at curlevel, curblkid.
3322 	 */
3323 	curlevel = level;
3324 	curblkid = blkid;
3325 	while (curlevel < nlevels - 1) {
3326 		int parent_level = curlevel + 1;
3327 		uint64_t parent_blkid = curblkid >> epbs;
3328 		dmu_buf_impl_t *db;
3329 
3330 		if (dbuf_hold_impl(dn, parent_level, parent_blkid,
3331 		    FALSE, TRUE, FTAG, &db) == 0) {
3332 			blkptr_t *bpp = db->db_buf->b_data;
3333 			bp = bpp[P2PHASE(curblkid, 1 << epbs)];
3334 			dbuf_rele(db, FTAG);
3335 			break;
3336 		}
3337 
3338 		curlevel = parent_level;
3339 		curblkid = parent_blkid;
3340 	}
3341 
3342 	if (curlevel == nlevels - 1) {
3343 		/* No cached indirect blocks found. */
3344 		ASSERT3U(curblkid, <, dn->dn_phys->dn_nblkptr);
3345 		bp = dn->dn_phys->dn_blkptr[curblkid];
3346 	}
3347 	ASSERT(!BP_IS_REDACTED(&bp) ||
3348 	    dsl_dataset_feature_is_active(dn->dn_objset->os_dsl_dataset,
3349 	    SPA_FEATURE_REDACTED_DATASETS));
3350 	if (BP_IS_HOLE(&bp) || BP_IS_REDACTED(&bp))
3351 		goto no_issue;
3352 
3353 	ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp));
3354 
3355 	zio_t *pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL,
3356 	    ZIO_FLAG_CANFAIL);
3357 
3358 	dbuf_prefetch_arg_t *dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP);
3359 	dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
3360 	SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
3361 	    dn->dn_object, level, blkid);
3362 	dpa->dpa_curlevel = curlevel;
3363 	dpa->dpa_prio = prio;
3364 	dpa->dpa_aflags = aflags;
3365 	dpa->dpa_spa = dn->dn_objset->os_spa;
3366 	dpa->dpa_dnode = dn;
3367 	dpa->dpa_epbs = epbs;
3368 	dpa->dpa_zio = pio;
3369 	dpa->dpa_cb = cb;
3370 	dpa->dpa_arg = arg;
3371 
3372 	/* flag if L2ARC eligible, l2arc_noprefetch then decides */
3373 	if (DNODE_LEVEL_IS_L2CACHEABLE(dn, level))
3374 		dpa->dpa_aflags |= ARC_FLAG_L2CACHE;
3375 
3376 	/*
3377 	 * If we have the indirect just above us, no need to do the asynchronous
3378 	 * prefetch chain; we'll just run the last step ourselves.  If we're at
3379 	 * a higher level, though, we want to issue the prefetches for all the
3380 	 * indirect blocks asynchronously, so we can go on with whatever we were
3381 	 * doing.
3382 	 */
3383 	if (curlevel == level) {
3384 		ASSERT3U(curblkid, ==, blkid);
3385 		dbuf_issue_final_prefetch(dpa, &bp);
3386 	} else {
3387 		arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
3388 		zbookmark_phys_t zb;
3389 
3390 		/* flag if L2ARC eligible, l2arc_noprefetch then decides */
3391 		if (DNODE_LEVEL_IS_L2CACHEABLE(dn, level))
3392 			iter_aflags |= ARC_FLAG_L2CACHE;
3393 
3394 		SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
3395 		    dn->dn_object, curlevel, curblkid);
3396 		(void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
3397 		    &bp, dbuf_prefetch_indirect_done, dpa, prio,
3398 		    ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
3399 		    &iter_aflags, &zb);
3400 	}
3401 	/*
3402 	 * We use pio here instead of dpa_zio since it's possible that
3403 	 * dpa may have already been freed.
3404 	 */
3405 	zio_nowait(pio);
3406 	return (1);
3407 no_issue:
3408 	if (cb != NULL)
3409 		cb(arg, B_FALSE);
3410 	return (0);
3411 }
3412 
3413 int
dbuf_prefetch(dnode_t * dn,int64_t level,uint64_t blkid,zio_priority_t prio,arc_flags_t aflags)3414 dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio,
3415     arc_flags_t aflags)
3416 {
3417 
3418 	return (dbuf_prefetch_impl(dn, level, blkid, prio, aflags, NULL, NULL));
3419 }
3420 
3421 /*
3422  * Helper function for dbuf_hold_impl() to copy a buffer. Handles
3423  * the case of encrypted, compressed and uncompressed buffers by
3424  * allocating the new buffer, respectively, with arc_alloc_raw_buf(),
3425  * arc_alloc_compressed_buf() or arc_alloc_buf().*
3426  *
3427  * NOTE: Declared noinline to avoid stack bloat in dbuf_hold_impl().
3428  */
3429 noinline static void
dbuf_hold_copy(dnode_t * dn,dmu_buf_impl_t * db)3430 dbuf_hold_copy(dnode_t *dn, dmu_buf_impl_t *db)
3431 {
3432 	dbuf_dirty_record_t *dr = db->db_data_pending;
3433 	arc_buf_t *data = dr->dt.dl.dr_data;
3434 	enum zio_compress compress_type = arc_get_compression(data);
3435 	uint8_t complevel = arc_get_complevel(data);
3436 
3437 	if (arc_is_encrypted(data)) {
3438 		boolean_t byteorder;
3439 		uint8_t salt[ZIO_DATA_SALT_LEN];
3440 		uint8_t iv[ZIO_DATA_IV_LEN];
3441 		uint8_t mac[ZIO_DATA_MAC_LEN];
3442 
3443 		arc_get_raw_params(data, &byteorder, salt, iv, mac);
3444 		dbuf_set_data(db, arc_alloc_raw_buf(dn->dn_objset->os_spa, db,
3445 		    dmu_objset_id(dn->dn_objset), byteorder, salt, iv, mac,
3446 		    dn->dn_type, arc_buf_size(data), arc_buf_lsize(data),
3447 		    compress_type, complevel));
3448 	} else if (compress_type != ZIO_COMPRESS_OFF) {
3449 		dbuf_set_data(db, arc_alloc_compressed_buf(
3450 		    dn->dn_objset->os_spa, db, arc_buf_size(data),
3451 		    arc_buf_lsize(data), compress_type, complevel));
3452 	} else {
3453 		dbuf_set_data(db, arc_alloc_buf(dn->dn_objset->os_spa, db,
3454 		    DBUF_GET_BUFC_TYPE(db), db->db.db_size));
3455 	}
3456 
3457 	rw_enter(&db->db_rwlock, RW_WRITER);
3458 	bcopy(data->b_data, db->db.db_data, arc_buf_size(data));
3459 	rw_exit(&db->db_rwlock);
3460 }
3461 
3462 /*
3463  * Returns with db_holds incremented, and db_mtx not held.
3464  * Note: dn_struct_rwlock must be held.
3465  */
3466 int
dbuf_hold_impl(dnode_t * dn,uint8_t level,uint64_t blkid,boolean_t fail_sparse,boolean_t fail_uncached,void * tag,dmu_buf_impl_t ** dbp)3467 dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid,
3468     boolean_t fail_sparse, boolean_t fail_uncached,
3469     void *tag, dmu_buf_impl_t **dbp)
3470 {
3471 	dmu_buf_impl_t *db, *parent = NULL;
3472 
3473 	/* If the pool has been created, verify the tx_sync_lock is not held */
3474 	spa_t *spa = dn->dn_objset->os_spa;
3475 	dsl_pool_t *dp = spa->spa_dsl_pool;
3476 	if (dp != NULL) {
3477 		ASSERT(!MUTEX_HELD(&dp->dp_tx.tx_sync_lock));
3478 	}
3479 
3480 	ASSERT(blkid != DMU_BONUS_BLKID);
3481 	ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3482 	ASSERT3U(dn->dn_nlevels, >, level);
3483 
3484 	*dbp = NULL;
3485 
3486 	/* dbuf_find() returns with db_mtx held */
3487 	db = dbuf_find(dn->dn_objset, dn->dn_object, level, blkid);
3488 
3489 	if (db == NULL) {
3490 		blkptr_t *bp = NULL;
3491 		int err;
3492 
3493 		if (fail_uncached)
3494 			return (SET_ERROR(ENOENT));
3495 
3496 		ASSERT3P(parent, ==, NULL);
3497 		err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp);
3498 		if (fail_sparse) {
3499 			if (err == 0 && bp && BP_IS_HOLE(bp))
3500 				err = SET_ERROR(ENOENT);
3501 			if (err) {
3502 				if (parent)
3503 					dbuf_rele(parent, NULL);
3504 				return (err);
3505 			}
3506 		}
3507 		if (err && err != ENOENT)
3508 			return (err);
3509 		db = dbuf_create(dn, level, blkid, parent, bp);
3510 	}
3511 
3512 	if (fail_uncached && db->db_state != DB_CACHED) {
3513 		mutex_exit(&db->db_mtx);
3514 		return (SET_ERROR(ENOENT));
3515 	}
3516 
3517 	if (db->db_buf != NULL) {
3518 		arc_buf_access(db->db_buf);
3519 		ASSERT3P(db->db.db_data, ==, db->db_buf->b_data);
3520 	}
3521 
3522 	ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf));
3523 
3524 	/*
3525 	 * If this buffer is currently syncing out, and we are
3526 	 * still referencing it from db_data, we need to make a copy
3527 	 * of it in case we decide we want to dirty it again in this txg.
3528 	 */
3529 	if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
3530 	    dn->dn_object != DMU_META_DNODE_OBJECT &&
3531 	    db->db_state == DB_CACHED && db->db_data_pending) {
3532 		dbuf_dirty_record_t *dr = db->db_data_pending;
3533 		if (dr->dt.dl.dr_data == db->db_buf)
3534 			dbuf_hold_copy(dn, db);
3535 	}
3536 
3537 	if (multilist_link_active(&db->db_cache_link)) {
3538 		ASSERT(zfs_refcount_is_zero(&db->db_holds));
3539 		ASSERT(db->db_caching_status == DB_DBUF_CACHE ||
3540 		    db->db_caching_status == DB_DBUF_METADATA_CACHE);
3541 
3542 		multilist_remove(&dbuf_caches[db->db_caching_status].cache, db);
3543 		(void) zfs_refcount_remove_many(
3544 		    &dbuf_caches[db->db_caching_status].size,
3545 		    db->db.db_size, db);
3546 
3547 		if (db->db_caching_status == DB_DBUF_METADATA_CACHE) {
3548 			DBUF_STAT_BUMPDOWN(metadata_cache_count);
3549 		} else {
3550 			DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
3551 			DBUF_STAT_BUMPDOWN(cache_count);
3552 			DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
3553 			    db->db.db_size);
3554 		}
3555 		db->db_caching_status = DB_NO_CACHE;
3556 	}
3557 	(void) zfs_refcount_add(&db->db_holds, tag);
3558 	DBUF_VERIFY(db);
3559 	mutex_exit(&db->db_mtx);
3560 
3561 	/* NOTE: we can't rele the parent until after we drop the db_mtx */
3562 	if (parent)
3563 		dbuf_rele(parent, NULL);
3564 
3565 	ASSERT3P(DB_DNODE(db), ==, dn);
3566 	ASSERT3U(db->db_blkid, ==, blkid);
3567 	ASSERT3U(db->db_level, ==, level);
3568 	*dbp = db;
3569 
3570 	return (0);
3571 }
3572 
3573 dmu_buf_impl_t *
dbuf_hold(dnode_t * dn,uint64_t blkid,void * tag)3574 dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag)
3575 {
3576 	return (dbuf_hold_level(dn, 0, blkid, tag));
3577 }
3578 
3579 dmu_buf_impl_t *
dbuf_hold_level(dnode_t * dn,int level,uint64_t blkid,void * tag)3580 dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag)
3581 {
3582 	dmu_buf_impl_t *db;
3583 	int err = dbuf_hold_impl(dn, level, blkid, FALSE, FALSE, tag, &db);
3584 	return (err ? NULL : db);
3585 }
3586 
3587 void
dbuf_create_bonus(dnode_t * dn)3588 dbuf_create_bonus(dnode_t *dn)
3589 {
3590 	ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
3591 
3592 	ASSERT(dn->dn_bonus == NULL);
3593 	dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL);
3594 }
3595 
3596 int
dbuf_spill_set_blksz(dmu_buf_t * db_fake,uint64_t blksz,dmu_tx_t * tx)3597 dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx)
3598 {
3599 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3600 
3601 	if (db->db_blkid != DMU_SPILL_BLKID)
3602 		return (SET_ERROR(ENOTSUP));
3603 	if (blksz == 0)
3604 		blksz = SPA_MINBLOCKSIZE;
3605 	ASSERT3U(blksz, <=, spa_maxblocksize(dmu_objset_spa(db->db_objset)));
3606 	blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE);
3607 
3608 	dbuf_new_size(db, blksz, tx);
3609 
3610 	return (0);
3611 }
3612 
3613 void
dbuf_rm_spill(dnode_t * dn,dmu_tx_t * tx)3614 dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx)
3615 {
3616 	dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx);
3617 }
3618 
3619 #pragma weak dmu_buf_add_ref = dbuf_add_ref
3620 void
dbuf_add_ref(dmu_buf_impl_t * db,void * tag)3621 dbuf_add_ref(dmu_buf_impl_t *db, void *tag)
3622 {
3623 	int64_t holds = zfs_refcount_add(&db->db_holds, tag);
3624 	VERIFY3S(holds, >, 1);
3625 }
3626 
3627 #pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref
3628 boolean_t
dbuf_try_add_ref(dmu_buf_t * db_fake,objset_t * os,uint64_t obj,uint64_t blkid,void * tag)3629 dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid,
3630     void *tag)
3631 {
3632 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3633 	dmu_buf_impl_t *found_db;
3634 	boolean_t result = B_FALSE;
3635 
3636 	if (blkid == DMU_BONUS_BLKID)
3637 		found_db = dbuf_find_bonus(os, obj);
3638 	else
3639 		found_db = dbuf_find(os, obj, 0, blkid);
3640 
3641 	if (found_db != NULL) {
3642 		if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) {
3643 			(void) zfs_refcount_add(&db->db_holds, tag);
3644 			result = B_TRUE;
3645 		}
3646 		mutex_exit(&found_db->db_mtx);
3647 	}
3648 	return (result);
3649 }
3650 
3651 /*
3652  * If you call dbuf_rele() you had better not be referencing the dnode handle
3653  * unless you have some other direct or indirect hold on the dnode. (An indirect
3654  * hold is a hold on one of the dnode's dbufs, including the bonus buffer.)
3655  * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the
3656  * dnode's parent dbuf evicting its dnode handles.
3657  */
3658 void
dbuf_rele(dmu_buf_impl_t * db,void * tag)3659 dbuf_rele(dmu_buf_impl_t *db, void *tag)
3660 {
3661 	mutex_enter(&db->db_mtx);
3662 	dbuf_rele_and_unlock(db, tag, B_FALSE);
3663 }
3664 
3665 void
dmu_buf_rele(dmu_buf_t * db,void * tag)3666 dmu_buf_rele(dmu_buf_t *db, void *tag)
3667 {
3668 	dbuf_rele((dmu_buf_impl_t *)db, tag);
3669 }
3670 
3671 /*
3672  * dbuf_rele() for an already-locked dbuf.  This is necessary to allow
3673  * db_dirtycnt and db_holds to be updated atomically.  The 'evicting'
3674  * argument should be set if we are already in the dbuf-evicting code
3675  * path, in which case we don't want to recursively evict.  This allows us to
3676  * avoid deeply nested stacks that would have a call flow similar to this:
3677  *
3678  * dbuf_rele()-->dbuf_rele_and_unlock()-->dbuf_evict_notify()
3679  *	^						|
3680  *	|						|
3681  *	+-----dbuf_destroy()<--dbuf_evict_one()<--------+
3682  *
3683  */
3684 void
dbuf_rele_and_unlock(dmu_buf_impl_t * db,void * tag,boolean_t evicting)3685 dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag, boolean_t evicting)
3686 {
3687 	int64_t holds;
3688 	uint64_t size;
3689 
3690 	ASSERT(MUTEX_HELD(&db->db_mtx));
3691 	DBUF_VERIFY(db);
3692 
3693 	/*
3694 	 * Remove the reference to the dbuf before removing its hold on the
3695 	 * dnode so we can guarantee in dnode_move() that a referenced bonus
3696 	 * buffer has a corresponding dnode hold.
3697 	 */
3698 	holds = zfs_refcount_remove(&db->db_holds, tag);
3699 	ASSERT(holds >= 0);
3700 
3701 	/*
3702 	 * We can't freeze indirects if there is a possibility that they
3703 	 * may be modified in the current syncing context.
3704 	 */
3705 	if (db->db_buf != NULL &&
3706 	    holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) {
3707 		arc_buf_freeze(db->db_buf);
3708 	}
3709 
3710 	if (holds == db->db_dirtycnt &&
3711 	    db->db_level == 0 && db->db_user_immediate_evict)
3712 		dbuf_evict_user(db);
3713 
3714 	if (holds == 0) {
3715 		if (db->db_blkid == DMU_BONUS_BLKID) {
3716 			dnode_t *dn;
3717 			boolean_t evict_dbuf = db->db_pending_evict;
3718 
3719 			/*
3720 			 * If the dnode moves here, we cannot cross this
3721 			 * barrier until the move completes.
3722 			 */
3723 			DB_DNODE_ENTER(db);
3724 
3725 			dn = DB_DNODE(db);
3726 			atomic_dec_32(&dn->dn_dbufs_count);
3727 
3728 			/*
3729 			 * Decrementing the dbuf count means that the bonus
3730 			 * buffer's dnode hold is no longer discounted in
3731 			 * dnode_move(). The dnode cannot move until after
3732 			 * the dnode_rele() below.
3733 			 */
3734 			DB_DNODE_EXIT(db);
3735 
3736 			/*
3737 			 * Do not reference db after its lock is dropped.
3738 			 * Another thread may evict it.
3739 			 */
3740 			mutex_exit(&db->db_mtx);
3741 
3742 			if (evict_dbuf)
3743 				dnode_evict_bonus(dn);
3744 
3745 			dnode_rele(dn, db);
3746 		} else if (db->db_buf == NULL) {
3747 			/*
3748 			 * This is a special case: we never associated this
3749 			 * dbuf with any data allocated from the ARC.
3750 			 */
3751 			ASSERT(db->db_state == DB_UNCACHED ||
3752 			    db->db_state == DB_NOFILL);
3753 			dbuf_destroy(db);
3754 		} else if (arc_released(db->db_buf)) {
3755 			/*
3756 			 * This dbuf has anonymous data associated with it.
3757 			 */
3758 			dbuf_destroy(db);
3759 		} else {
3760 			boolean_t do_arc_evict = B_FALSE;
3761 			blkptr_t bp;
3762 			spa_t *spa = dmu_objset_spa(db->db_objset);
3763 
3764 			if (!DBUF_IS_CACHEABLE(db) &&
3765 			    db->db_blkptr != NULL &&
3766 			    !BP_IS_HOLE(db->db_blkptr) &&
3767 			    !BP_IS_EMBEDDED(db->db_blkptr)) {
3768 				do_arc_evict = B_TRUE;
3769 				bp = *db->db_blkptr;
3770 			}
3771 
3772 			if (!DBUF_IS_CACHEABLE(db) ||
3773 			    db->db_pending_evict) {
3774 				dbuf_destroy(db);
3775 			} else if (!multilist_link_active(&db->db_cache_link)) {
3776 				ASSERT3U(db->db_caching_status, ==,
3777 				    DB_NO_CACHE);
3778 
3779 				dbuf_cached_state_t dcs =
3780 				    dbuf_include_in_metadata_cache(db) ?
3781 				    DB_DBUF_METADATA_CACHE : DB_DBUF_CACHE;
3782 				db->db_caching_status = dcs;
3783 
3784 				multilist_insert(&dbuf_caches[dcs].cache, db);
3785 				uint64_t db_size = db->db.db_size;
3786 				size = zfs_refcount_add_many(
3787 				    &dbuf_caches[dcs].size, db_size, db);
3788 				uint8_t db_level = db->db_level;
3789 				mutex_exit(&db->db_mtx);
3790 
3791 				if (dcs == DB_DBUF_METADATA_CACHE) {
3792 					DBUF_STAT_BUMP(metadata_cache_count);
3793 					DBUF_STAT_MAX(
3794 					    metadata_cache_size_bytes_max,
3795 					    size);
3796 				} else {
3797 					DBUF_STAT_BUMP(cache_count);
3798 					DBUF_STAT_MAX(cache_size_bytes_max,
3799 					    size);
3800 					DBUF_STAT_BUMP(cache_levels[db_level]);
3801 					DBUF_STAT_INCR(
3802 					    cache_levels_bytes[db_level],
3803 					    db_size);
3804 				}
3805 
3806 				if (dcs == DB_DBUF_CACHE && !evicting)
3807 					dbuf_evict_notify(size);
3808 			}
3809 
3810 			if (do_arc_evict)
3811 				arc_freed(spa, &bp);
3812 		}
3813 	} else {
3814 		mutex_exit(&db->db_mtx);
3815 	}
3816 
3817 }
3818 
3819 #pragma weak dmu_buf_refcount = dbuf_refcount
3820 uint64_t
dbuf_refcount(dmu_buf_impl_t * db)3821 dbuf_refcount(dmu_buf_impl_t *db)
3822 {
3823 	return (zfs_refcount_count(&db->db_holds));
3824 }
3825 
3826 uint64_t
dmu_buf_user_refcount(dmu_buf_t * db_fake)3827 dmu_buf_user_refcount(dmu_buf_t *db_fake)
3828 {
3829 	uint64_t holds;
3830 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3831 
3832 	mutex_enter(&db->db_mtx);
3833 	ASSERT3U(zfs_refcount_count(&db->db_holds), >=, db->db_dirtycnt);
3834 	holds = zfs_refcount_count(&db->db_holds) - db->db_dirtycnt;
3835 	mutex_exit(&db->db_mtx);
3836 
3837 	return (holds);
3838 }
3839 
3840 void *
dmu_buf_replace_user(dmu_buf_t * db_fake,dmu_buf_user_t * old_user,dmu_buf_user_t * new_user)3841 dmu_buf_replace_user(dmu_buf_t *db_fake, dmu_buf_user_t *old_user,
3842     dmu_buf_user_t *new_user)
3843 {
3844 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3845 
3846 	mutex_enter(&db->db_mtx);
3847 	dbuf_verify_user(db, DBVU_NOT_EVICTING);
3848 	if (db->db_user == old_user)
3849 		db->db_user = new_user;
3850 	else
3851 		old_user = db->db_user;
3852 	dbuf_verify_user(db, DBVU_NOT_EVICTING);
3853 	mutex_exit(&db->db_mtx);
3854 
3855 	return (old_user);
3856 }
3857 
3858 void *
dmu_buf_set_user(dmu_buf_t * db_fake,dmu_buf_user_t * user)3859 dmu_buf_set_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
3860 {
3861 	return (dmu_buf_replace_user(db_fake, NULL, user));
3862 }
3863 
3864 void *
dmu_buf_set_user_ie(dmu_buf_t * db_fake,dmu_buf_user_t * user)3865 dmu_buf_set_user_ie(dmu_buf_t *db_fake, dmu_buf_user_t *user)
3866 {
3867 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3868 
3869 	db->db_user_immediate_evict = TRUE;
3870 	return (dmu_buf_set_user(db_fake, user));
3871 }
3872 
3873 void *
dmu_buf_remove_user(dmu_buf_t * db_fake,dmu_buf_user_t * user)3874 dmu_buf_remove_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
3875 {
3876 	return (dmu_buf_replace_user(db_fake, user, NULL));
3877 }
3878 
3879 void *
dmu_buf_get_user(dmu_buf_t * db_fake)3880 dmu_buf_get_user(dmu_buf_t *db_fake)
3881 {
3882 	dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3883 
3884 	dbuf_verify_user(db, DBVU_NOT_EVICTING);
3885 	return (db->db_user);
3886 }
3887 
3888 void
dmu_buf_user_evict_wait()3889 dmu_buf_user_evict_wait()
3890 {
3891 	taskq_wait(dbu_evict_taskq);
3892 }
3893 
3894 blkptr_t *
dmu_buf_get_blkptr(dmu_buf_t * db)3895 dmu_buf_get_blkptr(dmu_buf_t *db)
3896 {
3897 	dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
3898 	return (dbi->db_blkptr);
3899 }
3900 
3901 objset_t *
dmu_buf_get_objset(dmu_buf_t * db)3902 dmu_buf_get_objset(dmu_buf_t *db)
3903 {
3904 	dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
3905 	return (dbi->db_objset);
3906 }
3907 
3908 dnode_t *
dmu_buf_dnode_enter(dmu_buf_t * db)3909 dmu_buf_dnode_enter(dmu_buf_t *db)
3910 {
3911 	dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
3912 	DB_DNODE_ENTER(dbi);
3913 	return (DB_DNODE(dbi));
3914 }
3915 
3916 void
dmu_buf_dnode_exit(dmu_buf_t * db)3917 dmu_buf_dnode_exit(dmu_buf_t *db)
3918 {
3919 	dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
3920 	DB_DNODE_EXIT(dbi);
3921 }
3922 
3923 static void
dbuf_check_blkptr(dnode_t * dn,dmu_buf_impl_t * db)3924 dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
3925 {
3926 	/* ASSERT(dmu_tx_is_syncing(tx) */
3927 	ASSERT(MUTEX_HELD(&db->db_mtx));
3928 
3929 	if (db->db_blkptr != NULL)
3930 		return;
3931 
3932 	if (db->db_blkid == DMU_SPILL_BLKID) {
3933 		db->db_blkptr = DN_SPILL_BLKPTR(dn->dn_phys);
3934 		BP_ZERO(db->db_blkptr);
3935 		return;
3936 	}
3937 	if (db->db_level == dn->dn_phys->dn_nlevels-1) {
3938 		/*
3939 		 * This buffer was allocated at a time when there was
3940 		 * no available blkptrs from the dnode, or it was
3941 		 * inappropriate to hook it in (i.e., nlevels mismatch).
3942 		 */
3943 		ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr);
3944 		ASSERT(db->db_parent == NULL);
3945 		db->db_parent = dn->dn_dbuf;
3946 		db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid];
3947 		DBUF_VERIFY(db);
3948 	} else {
3949 		dmu_buf_impl_t *parent = db->db_parent;
3950 		int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
3951 
3952 		ASSERT(dn->dn_phys->dn_nlevels > 1);
3953 		if (parent == NULL) {
3954 			mutex_exit(&db->db_mtx);
3955 			rw_enter(&dn->dn_struct_rwlock, RW_READER);
3956 			parent = dbuf_hold_level(dn, db->db_level + 1,
3957 			    db->db_blkid >> epbs, db);
3958 			rw_exit(&dn->dn_struct_rwlock);
3959 			mutex_enter(&db->db_mtx);
3960 			db->db_parent = parent;
3961 		}
3962 		db->db_blkptr = (blkptr_t *)parent->db.db_data +
3963 		    (db->db_blkid & ((1ULL << epbs) - 1));
3964 		DBUF_VERIFY(db);
3965 	}
3966 }
3967 
3968 static void
dbuf_sync_bonus(dbuf_dirty_record_t * dr,dmu_tx_t * tx)3969 dbuf_sync_bonus(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
3970 {
3971 	dmu_buf_impl_t *db = dr->dr_dbuf;
3972 	void *data = dr->dt.dl.dr_data;
3973 
3974 	ASSERT0(db->db_level);
3975 	ASSERT(MUTEX_HELD(&db->db_mtx));
3976 	ASSERT(db->db_blkid == DMU_BONUS_BLKID);
3977 	ASSERT(data != NULL);
3978 
3979 	dnode_t *dn = dr->dr_dnode;
3980 	ASSERT3U(DN_MAX_BONUS_LEN(dn->dn_phys), <=,
3981 	    DN_SLOTS_TO_BONUSLEN(dn->dn_phys->dn_extra_slots + 1));
3982 	bcopy(data, DN_BONUS(dn->dn_phys), DN_MAX_BONUS_LEN(dn->dn_phys));
3983 
3984 	dbuf_sync_leaf_verify_bonus_dnode(dr);
3985 
3986 	dbuf_undirty_bonus(dr);
3987 	dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE);
3988 }
3989 
3990 /*
3991  * When syncing out a blocks of dnodes, adjust the block to deal with
3992  * encryption.  Normally, we make sure the block is decrypted before writing
3993  * it.  If we have crypt params, then we are writing a raw (encrypted) block,
3994  * from a raw receive.  In this case, set the ARC buf's crypt params so
3995  * that the BP will be filled with the correct byteorder, salt, iv, and mac.
3996  */
3997 static void
dbuf_prepare_encrypted_dnode_leaf(dbuf_dirty_record_t * dr)3998 dbuf_prepare_encrypted_dnode_leaf(dbuf_dirty_record_t *dr)
3999 {
4000 	int err;
4001 	dmu_buf_impl_t *db = dr->dr_dbuf;
4002 
4003 	ASSERT(MUTEX_HELD(&db->db_mtx));
4004 	ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT);
4005 	ASSERT3U(db->db_level, ==, 0);
4006 
4007 	if (!db->db_objset->os_raw_receive && arc_is_encrypted(db->db_buf)) {
4008 		zbookmark_phys_t zb;
4009 
4010 		/*
4011 		 * Unfortunately, there is currently no mechanism for
4012 		 * syncing context to handle decryption errors. An error
4013 		 * here is only possible if an attacker maliciously
4014 		 * changed a dnode block and updated the associated
4015 		 * checksums going up the block tree.
4016 		 */
4017 		SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
4018 		    db->db.db_object, db->db_level, db->db_blkid);
4019 		err = arc_untransform(db->db_buf, db->db_objset->os_spa,
4020 		    &zb, B_TRUE);
4021 		if (err)
4022 			panic("Invalid dnode block MAC");
4023 	} else if (dr->dt.dl.dr_has_raw_params) {
4024 		(void) arc_release(dr->dt.dl.dr_data, db);
4025 		arc_convert_to_raw(dr->dt.dl.dr_data,
4026 		    dmu_objset_id(db->db_objset),
4027 		    dr->dt.dl.dr_byteorder, DMU_OT_DNODE,
4028 		    dr->dt.dl.dr_salt, dr->dt.dl.dr_iv, dr->dt.dl.dr_mac);
4029 	}
4030 }
4031 
4032 /*
4033  * dbuf_sync_indirect() is called recursively from dbuf_sync_list() so it
4034  * is critical the we not allow the compiler to inline this function in to
4035  * dbuf_sync_list() thereby drastically bloating the stack usage.
4036  */
4037 noinline static void
dbuf_sync_indirect(dbuf_dirty_record_t * dr,dmu_tx_t * tx)4038 dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
4039 {
4040 	dmu_buf_impl_t *db = dr->dr_dbuf;
4041 	dnode_t *dn = dr->dr_dnode;
4042 
4043 	ASSERT(dmu_tx_is_syncing(tx));
4044 
4045 	dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
4046 
4047 	mutex_enter(&db->db_mtx);
4048 
4049 	ASSERT(db->db_level > 0);
4050 	DBUF_VERIFY(db);
4051 
4052 	/* Read the block if it hasn't been read yet. */
4053 	if (db->db_buf == NULL) {
4054 		mutex_exit(&db->db_mtx);
4055 		(void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
4056 		mutex_enter(&db->db_mtx);
4057 	}
4058 	ASSERT3U(db->db_state, ==, DB_CACHED);
4059 	ASSERT(db->db_buf != NULL);
4060 
4061 	/* Indirect block size must match what the dnode thinks it is. */
4062 	ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
4063 	dbuf_check_blkptr(dn, db);
4064 
4065 	/* Provide the pending dirty record to child dbufs */
4066 	db->db_data_pending = dr;
4067 
4068 	mutex_exit(&db->db_mtx);
4069 
4070 	dbuf_write(dr, db->db_buf, tx);
4071 
4072 	zio_t *zio = dr->dr_zio;
4073 	mutex_enter(&dr->dt.di.dr_mtx);
4074 	dbuf_sync_list(&dr->dt.di.dr_children, db->db_level - 1, tx);
4075 	ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
4076 	mutex_exit(&dr->dt.di.dr_mtx);
4077 	zio_nowait(zio);
4078 }
4079 
4080 /*
4081  * Verify that the size of the data in our bonus buffer does not exceed
4082  * its recorded size.
4083  *
4084  * The purpose of this verification is to catch any cases in development
4085  * where the size of a phys structure (i.e space_map_phys_t) grows and,
4086  * due to incorrect feature management, older pools expect to read more
4087  * data even though they didn't actually write it to begin with.
4088  *
4089  * For a example, this would catch an error in the feature logic where we
4090  * open an older pool and we expect to write the space map histogram of
4091  * a space map with size SPACE_MAP_SIZE_V0.
4092  */
4093 static void
dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t * dr)4094 dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr)
4095 {
4096 #ifdef ZFS_DEBUG
4097 	dnode_t *dn = dr->dr_dnode;
4098 
4099 	/*
4100 	 * Encrypted bonus buffers can have data past their bonuslen.
4101 	 * Skip the verification of these blocks.
4102 	 */
4103 	if (DMU_OT_IS_ENCRYPTED(dn->dn_bonustype))
4104 		return;
4105 
4106 	uint16_t bonuslen = dn->dn_phys->dn_bonuslen;
4107 	uint16_t maxbonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
4108 	ASSERT3U(bonuslen, <=, maxbonuslen);
4109 
4110 	arc_buf_t *datap = dr->dt.dl.dr_data;
4111 	char *datap_end = ((char *)datap) + bonuslen;
4112 	char *datap_max = ((char *)datap) + maxbonuslen;
4113 
4114 	/* ensure that everything is zero after our data */
4115 	for (; datap_end < datap_max; datap_end++)
4116 		ASSERT(*datap_end == 0);
4117 #endif
4118 }
4119 
4120 static blkptr_t *
dbuf_lightweight_bp(dbuf_dirty_record_t * dr)4121 dbuf_lightweight_bp(dbuf_dirty_record_t *dr)
4122 {
4123 	/* This must be a lightweight dirty record. */
4124 	ASSERT3P(dr->dr_dbuf, ==, NULL);
4125 	dnode_t *dn = dr->dr_dnode;
4126 
4127 	if (dn->dn_phys->dn_nlevels == 1) {
4128 		VERIFY3U(dr->dt.dll.dr_blkid, <, dn->dn_phys->dn_nblkptr);
4129 		return (&dn->dn_phys->dn_blkptr[dr->dt.dll.dr_blkid]);
4130 	} else {
4131 		dmu_buf_impl_t *parent_db = dr->dr_parent->dr_dbuf;
4132 		int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
4133 		VERIFY3U(parent_db->db_level, ==, 1);
4134 		VERIFY3P(parent_db->db_dnode_handle->dnh_dnode, ==, dn);
4135 		VERIFY3U(dr->dt.dll.dr_blkid >> epbs, ==, parent_db->db_blkid);
4136 		blkptr_t *bp = parent_db->db.db_data;
4137 		return (&bp[dr->dt.dll.dr_blkid & ((1 << epbs) - 1)]);
4138 	}
4139 }
4140 
4141 static void
dbuf_lightweight_ready(zio_t * zio)4142 dbuf_lightweight_ready(zio_t *zio)
4143 {
4144 	dbuf_dirty_record_t *dr = zio->io_private;
4145 	blkptr_t *bp = zio->io_bp;
4146 
4147 	if (zio->io_error != 0)
4148 		return;
4149 
4150 	dnode_t *dn = dr->dr_dnode;
4151 
4152 	blkptr_t *bp_orig = dbuf_lightweight_bp(dr);
4153 	spa_t *spa = dmu_objset_spa(dn->dn_objset);
4154 	int64_t delta = bp_get_dsize_sync(spa, bp) -
4155 	    bp_get_dsize_sync(spa, bp_orig);
4156 	dnode_diduse_space(dn, delta);
4157 
4158 	uint64_t blkid = dr->dt.dll.dr_blkid;
4159 	mutex_enter(&dn->dn_mtx);
4160 	if (blkid > dn->dn_phys->dn_maxblkid) {
4161 		ASSERT0(dn->dn_objset->os_raw_receive);
4162 		dn->dn_phys->dn_maxblkid = blkid;
4163 	}
4164 	mutex_exit(&dn->dn_mtx);
4165 
4166 	if (!BP_IS_EMBEDDED(bp)) {
4167 		uint64_t fill = BP_IS_HOLE(bp) ? 0 : 1;
4168 		BP_SET_FILL(bp, fill);
4169 	}
4170 
4171 	dmu_buf_impl_t *parent_db;
4172 	EQUIV(dr->dr_parent == NULL, dn->dn_phys->dn_nlevels == 1);
4173 	if (dr->dr_parent == NULL) {
4174 		parent_db = dn->dn_dbuf;
4175 	} else {
4176 		parent_db = dr->dr_parent->dr_dbuf;
4177 	}
4178 	rw_enter(&parent_db->db_rwlock, RW_WRITER);
4179 	*bp_orig = *bp;
4180 	rw_exit(&parent_db->db_rwlock);
4181 }
4182 
4183 static void
dbuf_lightweight_physdone(zio_t * zio)4184 dbuf_lightweight_physdone(zio_t *zio)
4185 {
4186 	dbuf_dirty_record_t *dr = zio->io_private;
4187 	dsl_pool_t *dp = spa_get_dsl(zio->io_spa);
4188 	ASSERT3U(dr->dr_txg, ==, zio->io_txg);
4189 
4190 	/*
4191 	 * The callback will be called io_phys_children times.  Retire one
4192 	 * portion of our dirty space each time we are called.  Any rounding
4193 	 * error will be cleaned up by dbuf_lightweight_done().
4194 	 */
4195 	int delta = dr->dr_accounted / zio->io_phys_children;
4196 	dsl_pool_undirty_space(dp, delta, zio->io_txg);
4197 }
4198 
4199 static void
dbuf_lightweight_done(zio_t * zio)4200 dbuf_lightweight_done(zio_t *zio)
4201 {
4202 	dbuf_dirty_record_t *dr = zio->io_private;
4203 
4204 	VERIFY0(zio->io_error);
4205 
4206 	objset_t *os = dr->dr_dnode->dn_objset;
4207 	dmu_tx_t *tx = os->os_synctx;
4208 
4209 	if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
4210 		ASSERT(BP_EQUAL(zio->io_bp, &zio->io_bp_orig));
4211 	} else {
4212 		dsl_dataset_t *ds = os->os_dsl_dataset;
4213 		(void) dsl_dataset_block_kill(ds, &zio->io_bp_orig, tx, B_TRUE);
4214 		dsl_dataset_block_born(ds, zio->io_bp, tx);
4215 	}
4216 
4217 	/*
4218 	 * See comment in dbuf_write_done().
4219 	 */
4220 	if (zio->io_phys_children == 0) {
4221 		dsl_pool_undirty_space(dmu_objset_pool(os),
4222 		    dr->dr_accounted, zio->io_txg);
4223 	} else {
4224 		dsl_pool_undirty_space(dmu_objset_pool(os),
4225 		    dr->dr_accounted % zio->io_phys_children, zio->io_txg);
4226 	}
4227 
4228 	abd_free(dr->dt.dll.dr_abd);
4229 	kmem_free(dr, sizeof (*dr));
4230 }
4231 
4232 noinline static void
dbuf_sync_lightweight(dbuf_dirty_record_t * dr,dmu_tx_t * tx)4233 dbuf_sync_lightweight(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
4234 {
4235 	dnode_t *dn = dr->dr_dnode;
4236 	zio_t *pio;
4237 	if (dn->dn_phys->dn_nlevels == 1) {
4238 		pio = dn->dn_zio;
4239 	} else {
4240 		pio = dr->dr_parent->dr_zio;
4241 	}
4242 
4243 	zbookmark_phys_t zb = {
4244 		.zb_objset = dmu_objset_id(dn->dn_objset),
4245 		.zb_object = dn->dn_object,
4246 		.zb_level = 0,
4247 		.zb_blkid = dr->dt.dll.dr_blkid,
4248 	};
4249 
4250 	/*
4251 	 * See comment in dbuf_write().  This is so that zio->io_bp_orig
4252 	 * will have the old BP in dbuf_lightweight_done().
4253 	 */
4254 	dr->dr_bp_copy = *dbuf_lightweight_bp(dr);
4255 
4256 	dr->dr_zio = zio_write(pio, dmu_objset_spa(dn->dn_objset),
4257 	    dmu_tx_get_txg(tx), &dr->dr_bp_copy, dr->dt.dll.dr_abd,
4258 	    dn->dn_datablksz, abd_get_size(dr->dt.dll.dr_abd),
4259 	    &dr->dt.dll.dr_props, dbuf_lightweight_ready, NULL,
4260 	    dbuf_lightweight_physdone, dbuf_lightweight_done, dr,
4261 	    ZIO_PRIORITY_ASYNC_WRITE,
4262 	    ZIO_FLAG_MUSTSUCCEED | dr->dt.dll.dr_flags, &zb);
4263 
4264 	zio_nowait(dr->dr_zio);
4265 }
4266 
4267 /*
4268  * dbuf_sync_leaf() is called recursively from dbuf_sync_list() so it is
4269  * critical the we not allow the compiler to inline this function in to
4270  * dbuf_sync_list() thereby drastically bloating the stack usage.
4271  */
4272 noinline static void
dbuf_sync_leaf(dbuf_dirty_record_t * dr,dmu_tx_t * tx)4273 dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
4274 {
4275 	arc_buf_t **datap = &dr->dt.dl.dr_data;
4276 	dmu_buf_impl_t *db = dr->dr_dbuf;
4277 	dnode_t *dn = dr->dr_dnode;
4278 	objset_t *os;
4279 	uint64_t txg = tx->tx_txg;
4280 
4281 	ASSERT(dmu_tx_is_syncing(tx));
4282 
4283 	dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
4284 
4285 	mutex_enter(&db->db_mtx);
4286 	/*
4287 	 * To be synced, we must be dirtied.  But we
4288 	 * might have been freed after the dirty.
4289 	 */
4290 	if (db->db_state == DB_UNCACHED) {
4291 		/* This buffer has been freed since it was dirtied */
4292 		ASSERT(db->db.db_data == NULL);
4293 	} else if (db->db_state == DB_FILL) {
4294 		/* This buffer was freed and is now being re-filled */
4295 		ASSERT(db->db.db_data != dr->dt.dl.dr_data);
4296 	} else {
4297 		ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL);
4298 	}
4299 	DBUF_VERIFY(db);
4300 
4301 	if (db->db_blkid == DMU_SPILL_BLKID) {
4302 		mutex_enter(&dn->dn_mtx);
4303 		if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) {
4304 			/*
4305 			 * In the previous transaction group, the bonus buffer
4306 			 * was entirely used to store the attributes for the
4307 			 * dnode which overrode the dn_spill field.  However,
4308 			 * when adding more attributes to the file a spill
4309 			 * block was required to hold the extra attributes.
4310 			 *
4311 			 * Make sure to clear the garbage left in the dn_spill
4312 			 * field from the previous attributes in the bonus
4313 			 * buffer.  Otherwise, after writing out the spill
4314 			 * block to the new allocated dva, it will free
4315 			 * the old block pointed to by the invalid dn_spill.
4316 			 */
4317 			db->db_blkptr = NULL;
4318 		}
4319 		dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR;
4320 		mutex_exit(&dn->dn_mtx);
4321 	}
4322 
4323 	/*
4324 	 * If this is a bonus buffer, simply copy the bonus data into the
4325 	 * dnode.  It will be written out when the dnode is synced (and it
4326 	 * will be synced, since it must have been dirty for dbuf_sync to
4327 	 * be called).
4328 	 */
4329 	if (db->db_blkid == DMU_BONUS_BLKID) {
4330 		ASSERT(dr->dr_dbuf == db);
4331 		dbuf_sync_bonus(dr, tx);
4332 		return;
4333 	}
4334 
4335 	os = dn->dn_objset;
4336 
4337 	/*
4338 	 * This function may have dropped the db_mtx lock allowing a dmu_sync
4339 	 * operation to sneak in. As a result, we need to ensure that we
4340 	 * don't check the dr_override_state until we have returned from
4341 	 * dbuf_check_blkptr.
4342 	 */
4343 	dbuf_check_blkptr(dn, db);
4344 
4345 	/*
4346 	 * If this buffer is in the middle of an immediate write,
4347 	 * wait for the synchronous IO to complete.
4348 	 */
4349 	while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) {
4350 		ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
4351 		cv_wait(&db->db_changed, &db->db_mtx);
4352 		ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN);
4353 	}
4354 
4355 	/*
4356 	 * If this is a dnode block, ensure it is appropriately encrypted
4357 	 * or decrypted, depending on what we are writing to it this txg.
4358 	 */
4359 	if (os->os_encrypted && dn->dn_object == DMU_META_DNODE_OBJECT)
4360 		dbuf_prepare_encrypted_dnode_leaf(dr);
4361 
4362 	if (db->db_state != DB_NOFILL &&
4363 	    dn->dn_object != DMU_META_DNODE_OBJECT &&
4364 	    zfs_refcount_count(&db->db_holds) > 1 &&
4365 	    dr->dt.dl.dr_override_state != DR_OVERRIDDEN &&
4366 	    *datap == db->db_buf) {
4367 		/*
4368 		 * If this buffer is currently "in use" (i.e., there
4369 		 * are active holds and db_data still references it),
4370 		 * then make a copy before we start the write so that
4371 		 * any modifications from the open txg will not leak
4372 		 * into this write.
4373 		 *
4374 		 * NOTE: this copy does not need to be made for
4375 		 * objects only modified in the syncing context (e.g.
4376 		 * DNONE_DNODE blocks).
4377 		 */
4378 		int psize = arc_buf_size(*datap);
4379 		int lsize = arc_buf_lsize(*datap);
4380 		arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
4381 		enum zio_compress compress_type = arc_get_compression(*datap);
4382 		uint8_t complevel = arc_get_complevel(*datap);
4383 
4384 		if (arc_is_encrypted(*datap)) {
4385 			boolean_t byteorder;
4386 			uint8_t salt[ZIO_DATA_SALT_LEN];
4387 			uint8_t iv[ZIO_DATA_IV_LEN];
4388 			uint8_t mac[ZIO_DATA_MAC_LEN];
4389 
4390 			arc_get_raw_params(*datap, &byteorder, salt, iv, mac);
4391 			*datap = arc_alloc_raw_buf(os->os_spa, db,
4392 			    dmu_objset_id(os), byteorder, salt, iv, mac,
4393 			    dn->dn_type, psize, lsize, compress_type,
4394 			    complevel);
4395 		} else if (compress_type != ZIO_COMPRESS_OFF) {
4396 			ASSERT3U(type, ==, ARC_BUFC_DATA);
4397 			*datap = arc_alloc_compressed_buf(os->os_spa, db,
4398 			    psize, lsize, compress_type, complevel);
4399 		} else {
4400 			*datap = arc_alloc_buf(os->os_spa, db, type, psize);
4401 		}
4402 		bcopy(db->db.db_data, (*datap)->b_data, psize);
4403 	}
4404 	db->db_data_pending = dr;
4405 
4406 	mutex_exit(&db->db_mtx);
4407 
4408 	dbuf_write(dr, *datap, tx);
4409 
4410 	ASSERT(!list_link_active(&dr->dr_dirty_node));
4411 	if (dn->dn_object == DMU_META_DNODE_OBJECT) {
4412 		list_insert_tail(&dn->dn_dirty_records[txg & TXG_MASK], dr);
4413 	} else {
4414 		zio_nowait(dr->dr_zio);
4415 	}
4416 }
4417 
4418 void
dbuf_sync_list(list_t * list,int level,dmu_tx_t * tx)4419 dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx)
4420 {
4421 	dbuf_dirty_record_t *dr;
4422 
4423 	while ((dr = list_head(list))) {
4424 		if (dr->dr_zio != NULL) {
4425 			/*
4426 			 * If we find an already initialized zio then we
4427 			 * are processing the meta-dnode, and we have finished.
4428 			 * The dbufs for all dnodes are put back on the list
4429 			 * during processing, so that we can zio_wait()
4430 			 * these IOs after initiating all child IOs.
4431 			 */
4432 			ASSERT3U(dr->dr_dbuf->db.db_object, ==,
4433 			    DMU_META_DNODE_OBJECT);
4434 			break;
4435 		}
4436 		list_remove(list, dr);
4437 		if (dr->dr_dbuf == NULL) {
4438 			dbuf_sync_lightweight(dr, tx);
4439 		} else {
4440 			if (dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID &&
4441 			    dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) {
4442 				VERIFY3U(dr->dr_dbuf->db_level, ==, level);
4443 			}
4444 			if (dr->dr_dbuf->db_level > 0)
4445 				dbuf_sync_indirect(dr, tx);
4446 			else
4447 				dbuf_sync_leaf(dr, tx);
4448 		}
4449 	}
4450 }
4451 
4452 static void
dbuf_write_ready(zio_t * zio,arc_buf_t * buf,void * vdb)4453 dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
4454 {
4455 	(void) buf;
4456 	dmu_buf_impl_t *db = vdb;
4457 	dnode_t *dn;
4458 	blkptr_t *bp = zio->io_bp;
4459 	blkptr_t *bp_orig = &zio->io_bp_orig;
4460 	spa_t *spa = zio->io_spa;
4461 	int64_t delta;
4462 	uint64_t fill = 0;
4463 	int i;
4464 
4465 	ASSERT3P(db->db_blkptr, !=, NULL);
4466 	ASSERT3P(&db->db_data_pending->dr_bp_copy, ==, bp);
4467 
4468 	DB_DNODE_ENTER(db);
4469 	dn = DB_DNODE(db);
4470 	delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig);
4471 	dnode_diduse_space(dn, delta - zio->io_prev_space_delta);
4472 	zio->io_prev_space_delta = delta;
4473 
4474 	if (bp->blk_birth != 0) {
4475 		ASSERT((db->db_blkid != DMU_SPILL_BLKID &&
4476 		    BP_GET_TYPE(bp) == dn->dn_type) ||
4477 		    (db->db_blkid == DMU_SPILL_BLKID &&
4478 		    BP_GET_TYPE(bp) == dn->dn_bonustype) ||
4479 		    BP_IS_EMBEDDED(bp));
4480 		ASSERT(BP_GET_LEVEL(bp) == db->db_level);
4481 	}
4482 
4483 	mutex_enter(&db->db_mtx);
4484 
4485 #ifdef ZFS_DEBUG
4486 	if (db->db_blkid == DMU_SPILL_BLKID) {
4487 		ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
4488 		ASSERT(!(BP_IS_HOLE(bp)) &&
4489 		    db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys));
4490 	}
4491 #endif
4492 
4493 	if (db->db_level == 0) {
4494 		mutex_enter(&dn->dn_mtx);
4495 		if (db->db_blkid > dn->dn_phys->dn_maxblkid &&
4496 		    db->db_blkid != DMU_SPILL_BLKID) {
4497 			ASSERT0(db->db_objset->os_raw_receive);
4498 			dn->dn_phys->dn_maxblkid = db->db_blkid;
4499 		}
4500 		mutex_exit(&dn->dn_mtx);
4501 
4502 		if (dn->dn_type == DMU_OT_DNODE) {
4503 			i = 0;
4504 			while (i < db->db.db_size) {
4505 				dnode_phys_t *dnp =
4506 				    (void *)(((char *)db->db.db_data) + i);
4507 
4508 				i += DNODE_MIN_SIZE;
4509 				if (dnp->dn_type != DMU_OT_NONE) {
4510 					fill++;
4511 					i += dnp->dn_extra_slots *
4512 					    DNODE_MIN_SIZE;
4513 				}
4514 			}
4515 		} else {
4516 			if (BP_IS_HOLE(bp)) {
4517 				fill = 0;
4518 			} else {
4519 				fill = 1;
4520 			}
4521 		}
4522 	} else {
4523 		blkptr_t *ibp = db->db.db_data;
4524 		ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
4525 		for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) {
4526 			if (BP_IS_HOLE(ibp))
4527 				continue;
4528 			fill += BP_GET_FILL(ibp);
4529 		}
4530 	}
4531 	DB_DNODE_EXIT(db);
4532 
4533 	if (!BP_IS_EMBEDDED(bp))
4534 		BP_SET_FILL(bp, fill);
4535 
4536 	mutex_exit(&db->db_mtx);
4537 
4538 	db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_WRITER, FTAG);
4539 	*db->db_blkptr = *bp;
4540 	dmu_buf_unlock_parent(db, dblt, FTAG);
4541 }
4542 
4543 /*
4544  * This function gets called just prior to running through the compression
4545  * stage of the zio pipeline. If we're an indirect block comprised of only
4546  * holes, then we want this indirect to be compressed away to a hole. In
4547  * order to do that we must zero out any information about the holes that
4548  * this indirect points to prior to before we try to compress it.
4549  */
4550 static void
dbuf_write_children_ready(zio_t * zio,arc_buf_t * buf,void * vdb)4551 dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
4552 {
4553 	(void) zio, (void) buf;
4554 	dmu_buf_impl_t *db = vdb;
4555 	dnode_t *dn;
4556 	blkptr_t *bp;
4557 	unsigned int epbs, i;
4558 
4559 	ASSERT3U(db->db_level, >, 0);
4560 	DB_DNODE_ENTER(db);
4561 	dn = DB_DNODE(db);
4562 	epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
4563 	ASSERT3U(epbs, <, 31);
4564 
4565 	/* Determine if all our children are holes */
4566 	for (i = 0, bp = db->db.db_data; i < 1ULL << epbs; i++, bp++) {
4567 		if (!BP_IS_HOLE(bp))
4568 			break;
4569 	}
4570 
4571 	/*
4572 	 * If all the children are holes, then zero them all out so that
4573 	 * we may get compressed away.
4574 	 */
4575 	if (i == 1ULL << epbs) {
4576 		/*
4577 		 * We only found holes. Grab the rwlock to prevent
4578 		 * anybody from reading the blocks we're about to
4579 		 * zero out.
4580 		 */
4581 		rw_enter(&db->db_rwlock, RW_WRITER);
4582 		bzero(db->db.db_data, db->db.db_size);
4583 		rw_exit(&db->db_rwlock);
4584 	}
4585 	DB_DNODE_EXIT(db);
4586 }
4587 
4588 /*
4589  * The SPA will call this callback several times for each zio - once
4590  * for every physical child i/o (zio->io_phys_children times).  This
4591  * allows the DMU to monitor the progress of each logical i/o.  For example,
4592  * there may be 2 copies of an indirect block, or many fragments of a RAID-Z
4593  * block.  There may be a long delay before all copies/fragments are completed,
4594  * so this callback allows us to retire dirty space gradually, as the physical
4595  * i/os complete.
4596  */
4597 static void
dbuf_write_physdone(zio_t * zio,arc_buf_t * buf,void * arg)4598 dbuf_write_physdone(zio_t *zio, arc_buf_t *buf, void *arg)
4599 {
4600 	(void) buf;
4601 	dmu_buf_impl_t *db = arg;
4602 	objset_t *os = db->db_objset;
4603 	dsl_pool_t *dp = dmu_objset_pool(os);
4604 	dbuf_dirty_record_t *dr;
4605 	int delta = 0;
4606 
4607 	dr = db->db_data_pending;
4608 	ASSERT3U(dr->dr_txg, ==, zio->io_txg);
4609 
4610 	/*
4611 	 * The callback will be called io_phys_children times.  Retire one
4612 	 * portion of our dirty space each time we are called.  Any rounding
4613 	 * error will be cleaned up by dbuf_write_done().
4614 	 */
4615 	delta = dr->dr_accounted / zio->io_phys_children;
4616 	dsl_pool_undirty_space(dp, delta, zio->io_txg);
4617 }
4618 
4619 static void
dbuf_write_done(zio_t * zio,arc_buf_t * buf,void * vdb)4620 dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
4621 {
4622 	(void) buf;
4623 	dmu_buf_impl_t *db = vdb;
4624 	blkptr_t *bp_orig = &zio->io_bp_orig;
4625 	blkptr_t *bp = db->db_blkptr;
4626 	objset_t *os = db->db_objset;
4627 	dmu_tx_t *tx = os->os_synctx;
4628 
4629 	ASSERT0(zio->io_error);
4630 	ASSERT(db->db_blkptr == bp);
4631 
4632 	/*
4633 	 * For nopwrites and rewrites we ensure that the bp matches our
4634 	 * original and bypass all the accounting.
4635 	 */
4636 	if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
4637 		ASSERT(BP_EQUAL(bp, bp_orig));
4638 	} else {
4639 		dsl_dataset_t *ds = os->os_dsl_dataset;
4640 		(void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
4641 		dsl_dataset_block_born(ds, bp, tx);
4642 	}
4643 
4644 	mutex_enter(&db->db_mtx);
4645 
4646 	DBUF_VERIFY(db);
4647 
4648 	dbuf_dirty_record_t *dr = db->db_data_pending;
4649 	dnode_t *dn = dr->dr_dnode;
4650 	ASSERT(!list_link_active(&dr->dr_dirty_node));
4651 	ASSERT(dr->dr_dbuf == db);
4652 	ASSERT(list_next(&db->db_dirty_records, dr) == NULL);
4653 	list_remove(&db->db_dirty_records, dr);
4654 
4655 #ifdef ZFS_DEBUG
4656 	if (db->db_blkid == DMU_SPILL_BLKID) {
4657 		ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
4658 		ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
4659 		    db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys));
4660 	}
4661 #endif
4662 
4663 	if (db->db_level == 0) {
4664 		ASSERT(db->db_blkid != DMU_BONUS_BLKID);
4665 		ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
4666 		if (db->db_state != DB_NOFILL) {
4667 			if (dr->dt.dl.dr_data != db->db_buf)
4668 				arc_buf_destroy(dr->dt.dl.dr_data, db);
4669 		}
4670 	} else {
4671 		ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
4672 		ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift);
4673 		if (!BP_IS_HOLE(db->db_blkptr)) {
4674 			int epbs __maybe_unused = dn->dn_phys->dn_indblkshift -
4675 			    SPA_BLKPTRSHIFT;
4676 			ASSERT3U(db->db_blkid, <=,
4677 			    dn->dn_phys->dn_maxblkid >> (db->db_level * epbs));
4678 			ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
4679 			    db->db.db_size);
4680 		}
4681 		mutex_destroy(&dr->dt.di.dr_mtx);
4682 		list_destroy(&dr->dt.di.dr_children);
4683 	}
4684 
4685 	cv_broadcast(&db->db_changed);
4686 	ASSERT(db->db_dirtycnt > 0);
4687 	db->db_dirtycnt -= 1;
4688 	db->db_data_pending = NULL;
4689 	dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE);
4690 
4691 	/*
4692 	 * If we didn't do a physical write in this ZIO and we
4693 	 * still ended up here, it means that the space of the
4694 	 * dbuf that we just released (and undirtied) above hasn't
4695 	 * been marked as undirtied in the pool's accounting.
4696 	 *
4697 	 * Thus, we undirty that space in the pool's view of the
4698 	 * world here. For physical writes this type of update
4699 	 * happens in dbuf_write_physdone().
4700 	 *
4701 	 * If we did a physical write, cleanup any rounding errors
4702 	 * that came up due to writing multiple copies of a block
4703 	 * on disk [see dbuf_write_physdone()].
4704 	 */
4705 	if (zio->io_phys_children == 0) {
4706 		dsl_pool_undirty_space(dmu_objset_pool(os),
4707 		    dr->dr_accounted, zio->io_txg);
4708 	} else {
4709 		dsl_pool_undirty_space(dmu_objset_pool(os),
4710 		    dr->dr_accounted % zio->io_phys_children, zio->io_txg);
4711 	}
4712 
4713 	kmem_free(dr, sizeof (dbuf_dirty_record_t));
4714 }
4715 
4716 static void
dbuf_write_nofill_ready(zio_t * zio)4717 dbuf_write_nofill_ready(zio_t *zio)
4718 {
4719 	dbuf_write_ready(zio, NULL, zio->io_private);
4720 }
4721 
4722 static void
dbuf_write_nofill_done(zio_t * zio)4723 dbuf_write_nofill_done(zio_t *zio)
4724 {
4725 	dbuf_write_done(zio, NULL, zio->io_private);
4726 }
4727 
4728 static void
dbuf_write_override_ready(zio_t * zio)4729 dbuf_write_override_ready(zio_t *zio)
4730 {
4731 	dbuf_dirty_record_t *dr = zio->io_private;
4732 	dmu_buf_impl_t *db = dr->dr_dbuf;
4733 
4734 	dbuf_write_ready(zio, NULL, db);
4735 }
4736 
4737 static void
dbuf_write_override_done(zio_t * zio)4738 dbuf_write_override_done(zio_t *zio)
4739 {
4740 	dbuf_dirty_record_t *dr = zio->io_private;
4741 	dmu_buf_impl_t *db = dr->dr_dbuf;
4742 	blkptr_t *obp = &dr->dt.dl.dr_overridden_by;
4743 
4744 	mutex_enter(&db->db_mtx);
4745 	if (!BP_EQUAL(zio->io_bp, obp)) {
4746 		if (!BP_IS_HOLE(obp))
4747 			dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp);
4748 		arc_release(dr->dt.dl.dr_data, db);
4749 	}
4750 	mutex_exit(&db->db_mtx);
4751 
4752 	dbuf_write_done(zio, NULL, db);
4753 
4754 	if (zio->io_abd != NULL)
4755 		abd_free(zio->io_abd);
4756 }
4757 
4758 typedef struct dbuf_remap_impl_callback_arg {
4759 	objset_t	*drica_os;
4760 	uint64_t	drica_blk_birth;
4761 	dmu_tx_t	*drica_tx;
4762 } dbuf_remap_impl_callback_arg_t;
4763 
4764 static void
dbuf_remap_impl_callback(uint64_t vdev,uint64_t offset,uint64_t size,void * arg)4765 dbuf_remap_impl_callback(uint64_t vdev, uint64_t offset, uint64_t size,
4766     void *arg)
4767 {
4768 	dbuf_remap_impl_callback_arg_t *drica = arg;
4769 	objset_t *os = drica->drica_os;
4770 	spa_t *spa = dmu_objset_spa(os);
4771 	dmu_tx_t *tx = drica->drica_tx;
4772 
4773 	ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
4774 
4775 	if (os == spa_meta_objset(spa)) {
4776 		spa_vdev_indirect_mark_obsolete(spa, vdev, offset, size, tx);
4777 	} else {
4778 		dsl_dataset_block_remapped(dmu_objset_ds(os), vdev, offset,
4779 		    size, drica->drica_blk_birth, tx);
4780 	}
4781 }
4782 
4783 static void
dbuf_remap_impl(dnode_t * dn,blkptr_t * bp,krwlock_t * rw,dmu_tx_t * tx)4784 dbuf_remap_impl(dnode_t *dn, blkptr_t *bp, krwlock_t *rw, dmu_tx_t *tx)
4785 {
4786 	blkptr_t bp_copy = *bp;
4787 	spa_t *spa = dmu_objset_spa(dn->dn_objset);
4788 	dbuf_remap_impl_callback_arg_t drica;
4789 
4790 	ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
4791 
4792 	drica.drica_os = dn->dn_objset;
4793 	drica.drica_blk_birth = bp->blk_birth;
4794 	drica.drica_tx = tx;
4795 	if (spa_remap_blkptr(spa, &bp_copy, dbuf_remap_impl_callback,
4796 	    &drica)) {
4797 		/*
4798 		 * If the blkptr being remapped is tracked by a livelist,
4799 		 * then we need to make sure the livelist reflects the update.
4800 		 * First, cancel out the old blkptr by appending a 'FREE'
4801 		 * entry. Next, add an 'ALLOC' to track the new version. This
4802 		 * way we avoid trying to free an inaccurate blkptr at delete.
4803 		 * Note that embedded blkptrs are not tracked in livelists.
4804 		 */
4805 		if (dn->dn_objset != spa_meta_objset(spa)) {
4806 			dsl_dataset_t *ds = dmu_objset_ds(dn->dn_objset);
4807 			if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist) &&
4808 			    bp->blk_birth > ds->ds_dir->dd_origin_txg) {
4809 				ASSERT(!BP_IS_EMBEDDED(bp));
4810 				ASSERT(dsl_dir_is_clone(ds->ds_dir));
4811 				ASSERT(spa_feature_is_enabled(spa,
4812 				    SPA_FEATURE_LIVELIST));
4813 				bplist_append(&ds->ds_dir->dd_pending_frees,
4814 				    bp);
4815 				bplist_append(&ds->ds_dir->dd_pending_allocs,
4816 				    &bp_copy);
4817 			}
4818 		}
4819 
4820 		/*
4821 		 * The db_rwlock prevents dbuf_read_impl() from
4822 		 * dereferencing the BP while we are changing it.  To
4823 		 * avoid lock contention, only grab it when we are actually
4824 		 * changing the BP.
4825 		 */
4826 		if (rw != NULL)
4827 			rw_enter(rw, RW_WRITER);
4828 		*bp = bp_copy;
4829 		if (rw != NULL)
4830 			rw_exit(rw);
4831 	}
4832 }
4833 
4834 /*
4835  * Remap any existing BP's to concrete vdevs, if possible.
4836  */
4837 static void
dbuf_remap(dnode_t * dn,dmu_buf_impl_t * db,dmu_tx_t * tx)4838 dbuf_remap(dnode_t *dn, dmu_buf_impl_t *db, dmu_tx_t *tx)
4839 {
4840 	spa_t *spa = dmu_objset_spa(db->db_objset);
4841 	ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
4842 
4843 	if (!spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL))
4844 		return;
4845 
4846 	if (db->db_level > 0) {
4847 		blkptr_t *bp = db->db.db_data;
4848 		for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) {
4849 			dbuf_remap_impl(dn, &bp[i], &db->db_rwlock, tx);
4850 		}
4851 	} else if (db->db.db_object == DMU_META_DNODE_OBJECT) {
4852 		dnode_phys_t *dnp = db->db.db_data;
4853 		ASSERT3U(db->db_dnode_handle->dnh_dnode->dn_type, ==,
4854 		    DMU_OT_DNODE);
4855 		for (int i = 0; i < db->db.db_size >> DNODE_SHIFT;
4856 		    i += dnp[i].dn_extra_slots + 1) {
4857 			for (int j = 0; j < dnp[i].dn_nblkptr; j++) {
4858 				krwlock_t *lock = (dn->dn_dbuf == NULL ? NULL :
4859 				    &dn->dn_dbuf->db_rwlock);
4860 				dbuf_remap_impl(dn, &dnp[i].dn_blkptr[j], lock,
4861 				    tx);
4862 			}
4863 		}
4864 	}
4865 }
4866 
4867 
4868 /* Issue I/O to commit a dirty buffer to disk. */
4869 static void
dbuf_write(dbuf_dirty_record_t * dr,arc_buf_t * data,dmu_tx_t * tx)4870 dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
4871 {
4872 	dmu_buf_impl_t *db = dr->dr_dbuf;
4873 	dnode_t *dn = dr->dr_dnode;
4874 	objset_t *os;
4875 	dmu_buf_impl_t *parent = db->db_parent;
4876 	uint64_t txg = tx->tx_txg;
4877 	zbookmark_phys_t zb;
4878 	zio_prop_t zp;
4879 	zio_t *pio; /* parent I/O */
4880 	int wp_flag = 0;
4881 
4882 	ASSERT(dmu_tx_is_syncing(tx));
4883 
4884 	os = dn->dn_objset;
4885 
4886 	if (db->db_state != DB_NOFILL) {
4887 		if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) {
4888 			/*
4889 			 * Private object buffers are released here rather
4890 			 * than in dbuf_dirty() since they are only modified
4891 			 * in the syncing context and we don't want the
4892 			 * overhead of making multiple copies of the data.
4893 			 */
4894 			if (BP_IS_HOLE(db->db_blkptr)) {
4895 				arc_buf_thaw(data);
4896 			} else {
4897 				dbuf_release_bp(db);
4898 			}
4899 			dbuf_remap(dn, db, tx);
4900 		}
4901 	}
4902 
4903 	if (parent != dn->dn_dbuf) {
4904 		/* Our parent is an indirect block. */
4905 		/* We have a dirty parent that has been scheduled for write. */
4906 		ASSERT(parent && parent->db_data_pending);
4907 		/* Our parent's buffer is one level closer to the dnode. */
4908 		ASSERT(db->db_level == parent->db_level-1);
4909 		/*
4910 		 * We're about to modify our parent's db_data by modifying
4911 		 * our block pointer, so the parent must be released.
4912 		 */
4913 		ASSERT(arc_released(parent->db_buf));
4914 		pio = parent->db_data_pending->dr_zio;
4915 	} else {
4916 		/* Our parent is the dnode itself. */
4917 		ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 &&
4918 		    db->db_blkid != DMU_SPILL_BLKID) ||
4919 		    (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0));
4920 		if (db->db_blkid != DMU_SPILL_BLKID)
4921 			ASSERT3P(db->db_blkptr, ==,
4922 			    &dn->dn_phys->dn_blkptr[db->db_blkid]);
4923 		pio = dn->dn_zio;
4924 	}
4925 
4926 	ASSERT(db->db_level == 0 || data == db->db_buf);
4927 	ASSERT3U(db->db_blkptr->blk_birth, <=, txg);
4928 	ASSERT(pio);
4929 
4930 	SET_BOOKMARK(&zb, os->os_dsl_dataset ?
4931 	    os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
4932 	    db->db.db_object, db->db_level, db->db_blkid);
4933 
4934 	if (db->db_blkid == DMU_SPILL_BLKID)
4935 		wp_flag = WP_SPILL;
4936 	wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0;
4937 
4938 	dmu_write_policy(os, dn, db->db_level, wp_flag, &zp);
4939 
4940 	/*
4941 	 * We copy the blkptr now (rather than when we instantiate the dirty
4942 	 * record), because its value can change between open context and
4943 	 * syncing context. We do not need to hold dn_struct_rwlock to read
4944 	 * db_blkptr because we are in syncing context.
4945 	 */
4946 	dr->dr_bp_copy = *db->db_blkptr;
4947 
4948 	if (db->db_level == 0 &&
4949 	    dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
4950 		/*
4951 		 * The BP for this block has been provided by open context
4952 		 * (by dmu_sync() or dmu_buf_write_embedded()).
4953 		 */
4954 		abd_t *contents = (data != NULL) ?
4955 		    abd_get_from_buf(data->b_data, arc_buf_size(data)) : NULL;
4956 
4957 		dr->dr_zio = zio_write(pio, os->os_spa, txg, &dr->dr_bp_copy,
4958 		    contents, db->db.db_size, db->db.db_size, &zp,
4959 		    dbuf_write_override_ready, NULL, NULL,
4960 		    dbuf_write_override_done,
4961 		    dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
4962 		mutex_enter(&db->db_mtx);
4963 		dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
4964 		zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by,
4965 		    dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite);
4966 		mutex_exit(&db->db_mtx);
4967 	} else if (db->db_state == DB_NOFILL) {
4968 		ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF ||
4969 		    zp.zp_checksum == ZIO_CHECKSUM_NOPARITY);
4970 		dr->dr_zio = zio_write(pio, os->os_spa, txg,
4971 		    &dr->dr_bp_copy, NULL, db->db.db_size, db->db.db_size, &zp,
4972 		    dbuf_write_nofill_ready, NULL, NULL,
4973 		    dbuf_write_nofill_done, db,
4974 		    ZIO_PRIORITY_ASYNC_WRITE,
4975 		    ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb);
4976 	} else {
4977 		ASSERT(arc_released(data));
4978 
4979 		/*
4980 		 * For indirect blocks, we want to setup the children
4981 		 * ready callback so that we can properly handle an indirect
4982 		 * block that only contains holes.
4983 		 */
4984 		arc_write_done_func_t *children_ready_cb = NULL;
4985 		if (db->db_level != 0)
4986 			children_ready_cb = dbuf_write_children_ready;
4987 
4988 		dr->dr_zio = arc_write(pio, os->os_spa, txg,
4989 		    &dr->dr_bp_copy, data, DBUF_IS_L2CACHEABLE(db),
4990 		    &zp, dbuf_write_ready,
4991 		    children_ready_cb, dbuf_write_physdone,
4992 		    dbuf_write_done, db, ZIO_PRIORITY_ASYNC_WRITE,
4993 		    ZIO_FLAG_MUSTSUCCEED, &zb);
4994 	}
4995 }
4996 
4997 EXPORT_SYMBOL(dbuf_find);
4998 EXPORT_SYMBOL(dbuf_is_metadata);
4999 EXPORT_SYMBOL(dbuf_destroy);
5000 EXPORT_SYMBOL(dbuf_loan_arcbuf);
5001 EXPORT_SYMBOL(dbuf_whichblock);
5002 EXPORT_SYMBOL(dbuf_read);
5003 EXPORT_SYMBOL(dbuf_unoverride);
5004 EXPORT_SYMBOL(dbuf_free_range);
5005 EXPORT_SYMBOL(dbuf_new_size);
5006 EXPORT_SYMBOL(dbuf_release_bp);
5007 EXPORT_SYMBOL(dbuf_dirty);
5008 EXPORT_SYMBOL(dmu_buf_set_crypt_params);
5009 EXPORT_SYMBOL(dmu_buf_will_dirty);
5010 EXPORT_SYMBOL(dmu_buf_is_dirty);
5011 EXPORT_SYMBOL(dmu_buf_will_not_fill);
5012 EXPORT_SYMBOL(dmu_buf_will_fill);
5013 EXPORT_SYMBOL(dmu_buf_fill_done);
5014 EXPORT_SYMBOL(dmu_buf_rele);
5015 EXPORT_SYMBOL(dbuf_assign_arcbuf);
5016 EXPORT_SYMBOL(dbuf_prefetch);
5017 EXPORT_SYMBOL(dbuf_hold_impl);
5018 EXPORT_SYMBOL(dbuf_hold);
5019 EXPORT_SYMBOL(dbuf_hold_level);
5020 EXPORT_SYMBOL(dbuf_create_bonus);
5021 EXPORT_SYMBOL(dbuf_spill_set_blksz);
5022 EXPORT_SYMBOL(dbuf_rm_spill);
5023 EXPORT_SYMBOL(dbuf_add_ref);
5024 EXPORT_SYMBOL(dbuf_rele);
5025 EXPORT_SYMBOL(dbuf_rele_and_unlock);
5026 EXPORT_SYMBOL(dbuf_refcount);
5027 EXPORT_SYMBOL(dbuf_sync_list);
5028 EXPORT_SYMBOL(dmu_buf_set_user);
5029 EXPORT_SYMBOL(dmu_buf_set_user_ie);
5030 EXPORT_SYMBOL(dmu_buf_get_user);
5031 EXPORT_SYMBOL(dmu_buf_get_blkptr);
5032 
5033 /* BEGIN CSTYLED */
5034 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, max_bytes, ULONG, ZMOD_RW,
5035 	"Maximum size in bytes of the dbuf cache.");
5036 
5037 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, hiwater_pct, UINT, ZMOD_RW,
5038 	"Percentage over dbuf_cache_max_bytes when dbufs must be evicted "
5039 	"directly.");
5040 
5041 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, lowater_pct, UINT, ZMOD_RW,
5042 	"Percentage below dbuf_cache_max_bytes when the evict thread stops "
5043 	"evicting dbufs.");
5044 
5045 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_max_bytes, ULONG, ZMOD_RW,
5046 	"Maximum size in bytes of the dbuf metadata cache.");
5047 
5048 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, cache_shift, INT, ZMOD_RW,
5049 	"Set the size of the dbuf cache to a log2 fraction of arc size.");
5050 
5051 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_shift, INT, ZMOD_RW,
5052 	"Set the size of the dbuf metadata cache to a log2 fraction of arc "
5053 	"size.");
5054 /* END CSTYLED */
5055