xref: /linux-6.15/drivers/base/regmap/regcache.c (revision 05933e2d)
1 // SPDX-License-Identifier: GPL-2.0
2 //
3 // Register cache access API
4 //
5 // Copyright 2011 Wolfson Microelectronics plc
6 //
7 // Author: Dimitris Papastamos <[email protected]>
8 
9 #include <linux/bsearch.h>
10 #include <linux/device.h>
11 #include <linux/export.h>
12 #include <linux/slab.h>
13 #include <linux/sort.h>
14 
15 #include "trace.h"
16 #include "internal.h"
17 
18 static const struct regcache_ops *cache_types[] = {
19 	&regcache_rbtree_ops,
20 	&regcache_flat_ops,
21 };
22 
23 static int regcache_hw_init(struct regmap *map)
24 {
25 	int i, j;
26 	int ret;
27 	int count;
28 	unsigned int reg, val;
29 	void *tmp_buf;
30 
31 	if (!map->num_reg_defaults_raw)
32 		return -EINVAL;
33 
34 	/* calculate the size of reg_defaults */
35 	for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++)
36 		if (regmap_readable(map, i * map->reg_stride) &&
37 		    !regmap_volatile(map, i * map->reg_stride))
38 			count++;
39 
40 	/* all registers are unreadable or volatile, so just bypass */
41 	if (!count) {
42 		map->cache_bypass = true;
43 		return 0;
44 	}
45 
46 	map->num_reg_defaults = count;
47 	map->reg_defaults = kmalloc_array(count, sizeof(struct reg_default),
48 					  GFP_KERNEL);
49 	if (!map->reg_defaults)
50 		return -ENOMEM;
51 
52 	if (!map->reg_defaults_raw) {
53 		bool cache_bypass = map->cache_bypass;
54 		dev_warn(map->dev, "No cache defaults, reading back from HW\n");
55 
56 		/* Bypass the cache access till data read from HW */
57 		map->cache_bypass = true;
58 		tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
59 		if (!tmp_buf) {
60 			ret = -ENOMEM;
61 			goto err_free;
62 		}
63 		ret = regmap_raw_read(map, 0, tmp_buf,
64 				      map->cache_size_raw);
65 		map->cache_bypass = cache_bypass;
66 		if (ret == 0) {
67 			map->reg_defaults_raw = tmp_buf;
68 			map->cache_free = true;
69 		} else {
70 			kfree(tmp_buf);
71 		}
72 	}
73 
74 	/* fill the reg_defaults */
75 	for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
76 		reg = i * map->reg_stride;
77 
78 		if (!regmap_readable(map, reg))
79 			continue;
80 
81 		if (regmap_volatile(map, reg))
82 			continue;
83 
84 		if (map->reg_defaults_raw) {
85 			val = regcache_get_val(map, map->reg_defaults_raw, i);
86 		} else {
87 			bool cache_bypass = map->cache_bypass;
88 
89 			map->cache_bypass = true;
90 			ret = regmap_read(map, reg, &val);
91 			map->cache_bypass = cache_bypass;
92 			if (ret != 0) {
93 				dev_err(map->dev, "Failed to read %d: %d\n",
94 					reg, ret);
95 				goto err_free;
96 			}
97 		}
98 
99 		map->reg_defaults[j].reg = reg;
100 		map->reg_defaults[j].def = val;
101 		j++;
102 	}
103 
104 	return 0;
105 
106 err_free:
107 	kfree(map->reg_defaults);
108 
109 	return ret;
110 }
111 
112 int regcache_init(struct regmap *map, const struct regmap_config *config)
113 {
114 	int ret;
115 	int i;
116 	void *tmp_buf;
117 
118 	if (map->cache_type == REGCACHE_NONE) {
119 		if (config->reg_defaults || config->num_reg_defaults_raw)
120 			dev_warn(map->dev,
121 				 "No cache used with register defaults set!\n");
122 
123 		map->cache_bypass = true;
124 		return 0;
125 	}
126 
127 	if (config->reg_defaults && !config->num_reg_defaults) {
128 		dev_err(map->dev,
129 			 "Register defaults are set without the number!\n");
130 		return -EINVAL;
131 	}
132 
133 	if (config->num_reg_defaults && !config->reg_defaults) {
134 		dev_err(map->dev,
135 			"Register defaults number are set without the reg!\n");
136 		return -EINVAL;
137 	}
138 
139 	for (i = 0; i < config->num_reg_defaults; i++)
140 		if (config->reg_defaults[i].reg % map->reg_stride)
141 			return -EINVAL;
142 
143 	for (i = 0; i < ARRAY_SIZE(cache_types); i++)
144 		if (cache_types[i]->type == map->cache_type)
145 			break;
146 
147 	if (i == ARRAY_SIZE(cache_types)) {
148 		dev_err(map->dev, "Could not match cache type: %d\n",
149 			map->cache_type);
150 		return -EINVAL;
151 	}
152 
153 	map->num_reg_defaults = config->num_reg_defaults;
154 	map->num_reg_defaults_raw = config->num_reg_defaults_raw;
155 	map->reg_defaults_raw = config->reg_defaults_raw;
156 	map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8);
157 	map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw;
158 
159 	map->cache = NULL;
160 	map->cache_ops = cache_types[i];
161 
162 	if (!map->cache_ops->read ||
163 	    !map->cache_ops->write ||
164 	    !map->cache_ops->name)
165 		return -EINVAL;
166 
167 	/* We still need to ensure that the reg_defaults
168 	 * won't vanish from under us.  We'll need to make
169 	 * a copy of it.
170 	 */
171 	if (config->reg_defaults) {
172 		tmp_buf = kmemdup(config->reg_defaults, map->num_reg_defaults *
173 				  sizeof(struct reg_default), GFP_KERNEL);
174 		if (!tmp_buf)
175 			return -ENOMEM;
176 		map->reg_defaults = tmp_buf;
177 	} else if (map->num_reg_defaults_raw) {
178 		/* Some devices such as PMICs don't have cache defaults,
179 		 * we cope with this by reading back the HW registers and
180 		 * crafting the cache defaults by hand.
181 		 */
182 		ret = regcache_hw_init(map);
183 		if (ret < 0)
184 			return ret;
185 		if (map->cache_bypass)
186 			return 0;
187 	}
188 
189 	if (!map->max_register && map->num_reg_defaults_raw)
190 		map->max_register = (map->num_reg_defaults_raw  - 1) * map->reg_stride;
191 
192 	if (map->cache_ops->init) {
193 		dev_dbg(map->dev, "Initializing %s cache\n",
194 			map->cache_ops->name);
195 		ret = map->cache_ops->init(map);
196 		if (ret)
197 			goto err_free;
198 	}
199 	return 0;
200 
201 err_free:
202 	kfree(map->reg_defaults);
203 	if (map->cache_free)
204 		kfree(map->reg_defaults_raw);
205 
206 	return ret;
207 }
208 
209 void regcache_exit(struct regmap *map)
210 {
211 	if (map->cache_type == REGCACHE_NONE)
212 		return;
213 
214 	BUG_ON(!map->cache_ops);
215 
216 	kfree(map->reg_defaults);
217 	if (map->cache_free)
218 		kfree(map->reg_defaults_raw);
219 
220 	if (map->cache_ops->exit) {
221 		dev_dbg(map->dev, "Destroying %s cache\n",
222 			map->cache_ops->name);
223 		map->cache_ops->exit(map);
224 	}
225 }
226 
227 /**
228  * regcache_read - Fetch the value of a given register from the cache.
229  *
230  * @map: map to configure.
231  * @reg: The register index.
232  * @value: The value to be returned.
233  *
234  * Return a negative value on failure, 0 on success.
235  */
236 int regcache_read(struct regmap *map,
237 		  unsigned int reg, unsigned int *value)
238 {
239 	int ret;
240 
241 	if (map->cache_type == REGCACHE_NONE)
242 		return -EINVAL;
243 
244 	BUG_ON(!map->cache_ops);
245 
246 	if (!regmap_volatile(map, reg)) {
247 		ret = map->cache_ops->read(map, reg, value);
248 
249 		if (ret == 0)
250 			trace_regmap_reg_read_cache(map, reg, *value);
251 
252 		return ret;
253 	}
254 
255 	return -EINVAL;
256 }
257 
258 /**
259  * regcache_write - Set the value of a given register in the cache.
260  *
261  * @map: map to configure.
262  * @reg: The register index.
263  * @value: The new register value.
264  *
265  * Return a negative value on failure, 0 on success.
266  */
267 int regcache_write(struct regmap *map,
268 		   unsigned int reg, unsigned int value)
269 {
270 	if (map->cache_type == REGCACHE_NONE)
271 		return 0;
272 
273 	BUG_ON(!map->cache_ops);
274 
275 	if (!regmap_volatile(map, reg))
276 		return map->cache_ops->write(map, reg, value);
277 
278 	return 0;
279 }
280 
281 static bool regcache_reg_needs_sync(struct regmap *map, unsigned int reg,
282 				    unsigned int val)
283 {
284 	int ret;
285 
286 	/* If we don't know the chip just got reset, then sync everything. */
287 	if (!map->no_sync_defaults)
288 		return true;
289 
290 	/* Is this the hardware default?  If so skip. */
291 	ret = regcache_lookup_reg(map, reg);
292 	if (ret >= 0 && val == map->reg_defaults[ret].def)
293 		return false;
294 	return true;
295 }
296 
297 static int regcache_default_sync(struct regmap *map, unsigned int min,
298 				 unsigned int max)
299 {
300 	unsigned int reg;
301 
302 	for (reg = min; reg <= max; reg += map->reg_stride) {
303 		unsigned int val;
304 		int ret;
305 
306 		if (regmap_volatile(map, reg) ||
307 		    !regmap_writeable(map, reg))
308 			continue;
309 
310 		ret = regcache_read(map, reg, &val);
311 		if (ret == -ENOENT)
312 			continue;
313 		if (ret)
314 			return ret;
315 
316 		if (!regcache_reg_needs_sync(map, reg, val))
317 			continue;
318 
319 		map->cache_bypass = true;
320 		ret = _regmap_write(map, reg, val);
321 		map->cache_bypass = false;
322 		if (ret) {
323 			dev_err(map->dev, "Unable to sync register %#x. %d\n",
324 				reg, ret);
325 			return ret;
326 		}
327 		dev_dbg(map->dev, "Synced register %#x, value %#x\n", reg, val);
328 	}
329 
330 	return 0;
331 }
332 
333 /**
334  * regcache_sync - Sync the register cache with the hardware.
335  *
336  * @map: map to configure.
337  *
338  * Any registers that should not be synced should be marked as
339  * volatile.  In general drivers can choose not to use the provided
340  * syncing functionality if they so require.
341  *
342  * Return a negative value on failure, 0 on success.
343  */
344 int regcache_sync(struct regmap *map)
345 {
346 	int ret = 0;
347 	unsigned int i;
348 	const char *name;
349 	bool bypass;
350 
351 	if (WARN_ON(map->cache_type == REGCACHE_NONE))
352 		return -EINVAL;
353 
354 	BUG_ON(!map->cache_ops);
355 
356 	map->lock(map->lock_arg);
357 	/* Remember the initial bypass state */
358 	bypass = map->cache_bypass;
359 	dev_dbg(map->dev, "Syncing %s cache\n",
360 		map->cache_ops->name);
361 	name = map->cache_ops->name;
362 	trace_regcache_sync(map, name, "start");
363 
364 	if (!map->cache_dirty)
365 		goto out;
366 
367 	map->async = true;
368 
369 	/* Apply any patch first */
370 	map->cache_bypass = true;
371 	for (i = 0; i < map->patch_regs; i++) {
372 		ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def);
373 		if (ret != 0) {
374 			dev_err(map->dev, "Failed to write %x = %x: %d\n",
375 				map->patch[i].reg, map->patch[i].def, ret);
376 			goto out;
377 		}
378 	}
379 	map->cache_bypass = false;
380 
381 	if (map->cache_ops->sync)
382 		ret = map->cache_ops->sync(map, 0, map->max_register);
383 	else
384 		ret = regcache_default_sync(map, 0, map->max_register);
385 
386 	if (ret == 0)
387 		map->cache_dirty = false;
388 
389 out:
390 	/* Restore the bypass state */
391 	map->async = false;
392 	map->cache_bypass = bypass;
393 	map->no_sync_defaults = false;
394 	map->unlock(map->lock_arg);
395 
396 	regmap_async_complete(map);
397 
398 	trace_regcache_sync(map, name, "stop");
399 
400 	return ret;
401 }
402 EXPORT_SYMBOL_GPL(regcache_sync);
403 
404 /**
405  * regcache_sync_region - Sync part  of the register cache with the hardware.
406  *
407  * @map: map to sync.
408  * @min: first register to sync
409  * @max: last register to sync
410  *
411  * Write all non-default register values in the specified region to
412  * the hardware.
413  *
414  * Return a negative value on failure, 0 on success.
415  */
416 int regcache_sync_region(struct regmap *map, unsigned int min,
417 			 unsigned int max)
418 {
419 	int ret = 0;
420 	const char *name;
421 	bool bypass;
422 
423 	if (WARN_ON(map->cache_type == REGCACHE_NONE))
424 		return -EINVAL;
425 
426 	BUG_ON(!map->cache_ops);
427 
428 	map->lock(map->lock_arg);
429 
430 	/* Remember the initial bypass state */
431 	bypass = map->cache_bypass;
432 
433 	name = map->cache_ops->name;
434 	dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max);
435 
436 	trace_regcache_sync(map, name, "start region");
437 
438 	if (!map->cache_dirty)
439 		goto out;
440 
441 	map->async = true;
442 
443 	if (map->cache_ops->sync)
444 		ret = map->cache_ops->sync(map, min, max);
445 	else
446 		ret = regcache_default_sync(map, min, max);
447 
448 out:
449 	/* Restore the bypass state */
450 	map->cache_bypass = bypass;
451 	map->async = false;
452 	map->no_sync_defaults = false;
453 	map->unlock(map->lock_arg);
454 
455 	regmap_async_complete(map);
456 
457 	trace_regcache_sync(map, name, "stop region");
458 
459 	return ret;
460 }
461 EXPORT_SYMBOL_GPL(regcache_sync_region);
462 
463 /**
464  * regcache_drop_region - Discard part of the register cache
465  *
466  * @map: map to operate on
467  * @min: first register to discard
468  * @max: last register to discard
469  *
470  * Discard part of the register cache.
471  *
472  * Return a negative value on failure, 0 on success.
473  */
474 int regcache_drop_region(struct regmap *map, unsigned int min,
475 			 unsigned int max)
476 {
477 	int ret = 0;
478 
479 	if (!map->cache_ops || !map->cache_ops->drop)
480 		return -EINVAL;
481 
482 	map->lock(map->lock_arg);
483 
484 	trace_regcache_drop_region(map, min, max);
485 
486 	ret = map->cache_ops->drop(map, min, max);
487 
488 	map->unlock(map->lock_arg);
489 
490 	return ret;
491 }
492 EXPORT_SYMBOL_GPL(regcache_drop_region);
493 
494 /**
495  * regcache_cache_only - Put a register map into cache only mode
496  *
497  * @map: map to configure
498  * @enable: flag if changes should be written to the hardware
499  *
500  * When a register map is marked as cache only writes to the register
501  * map API will only update the register cache, they will not cause
502  * any hardware changes.  This is useful for allowing portions of
503  * drivers to act as though the device were functioning as normal when
504  * it is disabled for power saving reasons.
505  */
506 void regcache_cache_only(struct regmap *map, bool enable)
507 {
508 	map->lock(map->lock_arg);
509 	WARN_ON(map->cache_type != REGCACHE_NONE &&
510 		map->cache_bypass && enable);
511 	map->cache_only = enable;
512 	trace_regmap_cache_only(map, enable);
513 	map->unlock(map->lock_arg);
514 }
515 EXPORT_SYMBOL_GPL(regcache_cache_only);
516 
517 /**
518  * regcache_mark_dirty - Indicate that HW registers were reset to default values
519  *
520  * @map: map to mark
521  *
522  * Inform regcache that the device has been powered down or reset, so that
523  * on resume, regcache_sync() knows to write out all non-default values
524  * stored in the cache.
525  *
526  * If this function is not called, regcache_sync() will assume that
527  * the hardware state still matches the cache state, modulo any writes that
528  * happened when cache_only was true.
529  */
530 void regcache_mark_dirty(struct regmap *map)
531 {
532 	map->lock(map->lock_arg);
533 	map->cache_dirty = true;
534 	map->no_sync_defaults = true;
535 	map->unlock(map->lock_arg);
536 }
537 EXPORT_SYMBOL_GPL(regcache_mark_dirty);
538 
539 /**
540  * regcache_cache_bypass - Put a register map into cache bypass mode
541  *
542  * @map: map to configure
543  * @enable: flag if changes should not be written to the cache
544  *
545  * When a register map is marked with the cache bypass option, writes
546  * to the register map API will only update the hardware and not
547  * the cache directly.  This is useful when syncing the cache back to
548  * the hardware.
549  */
550 void regcache_cache_bypass(struct regmap *map, bool enable)
551 {
552 	map->lock(map->lock_arg);
553 	WARN_ON(map->cache_only && enable);
554 	map->cache_bypass = enable;
555 	trace_regmap_cache_bypass(map, enable);
556 	map->unlock(map->lock_arg);
557 }
558 EXPORT_SYMBOL_GPL(regcache_cache_bypass);
559 
560 bool regcache_set_val(struct regmap *map, void *base, unsigned int idx,
561 		      unsigned int val)
562 {
563 	if (regcache_get_val(map, base, idx) == val)
564 		return true;
565 
566 	/* Use device native format if possible */
567 	if (map->format.format_val) {
568 		map->format.format_val(base + (map->cache_word_size * idx),
569 				       val, 0);
570 		return false;
571 	}
572 
573 	switch (map->cache_word_size) {
574 	case 1: {
575 		u8 *cache = base;
576 
577 		cache[idx] = val;
578 		break;
579 	}
580 	case 2: {
581 		u16 *cache = base;
582 
583 		cache[idx] = val;
584 		break;
585 	}
586 	case 4: {
587 		u32 *cache = base;
588 
589 		cache[idx] = val;
590 		break;
591 	}
592 #ifdef CONFIG_64BIT
593 	case 8: {
594 		u64 *cache = base;
595 
596 		cache[idx] = val;
597 		break;
598 	}
599 #endif
600 	default:
601 		BUG();
602 	}
603 	return false;
604 }
605 
606 unsigned int regcache_get_val(struct regmap *map, const void *base,
607 			      unsigned int idx)
608 {
609 	if (!base)
610 		return -EINVAL;
611 
612 	/* Use device native format if possible */
613 	if (map->format.parse_val)
614 		return map->format.parse_val(regcache_get_val_addr(map, base,
615 								   idx));
616 
617 	switch (map->cache_word_size) {
618 	case 1: {
619 		const u8 *cache = base;
620 
621 		return cache[idx];
622 	}
623 	case 2: {
624 		const u16 *cache = base;
625 
626 		return cache[idx];
627 	}
628 	case 4: {
629 		const u32 *cache = base;
630 
631 		return cache[idx];
632 	}
633 #ifdef CONFIG_64BIT
634 	case 8: {
635 		const u64 *cache = base;
636 
637 		return cache[idx];
638 	}
639 #endif
640 	default:
641 		BUG();
642 	}
643 	/* unreachable */
644 	return -1;
645 }
646 
647 static int regcache_default_cmp(const void *a, const void *b)
648 {
649 	const struct reg_default *_a = a;
650 	const struct reg_default *_b = b;
651 
652 	return _a->reg - _b->reg;
653 }
654 
655 int regcache_lookup_reg(struct regmap *map, unsigned int reg)
656 {
657 	struct reg_default key;
658 	struct reg_default *r;
659 
660 	key.reg = reg;
661 	key.def = 0;
662 
663 	r = bsearch(&key, map->reg_defaults, map->num_reg_defaults,
664 		    sizeof(struct reg_default), regcache_default_cmp);
665 
666 	if (r)
667 		return r - map->reg_defaults;
668 	else
669 		return -ENOENT;
670 }
671 
672 static bool regcache_reg_present(unsigned long *cache_present, unsigned int idx)
673 {
674 	if (!cache_present)
675 		return true;
676 
677 	return test_bit(idx, cache_present);
678 }
679 
680 int regcache_sync_val(struct regmap *map, unsigned int reg, unsigned int val)
681 {
682 	int ret;
683 
684 	if (!regcache_reg_needs_sync(map, reg, val))
685 		return 0;
686 
687 	map->cache_bypass = true;
688 
689 	ret = _regmap_write(map, reg, val);
690 
691 	map->cache_bypass = false;
692 
693 	if (ret != 0) {
694 		dev_err(map->dev, "Unable to sync register %#x. %d\n",
695 			reg, ret);
696 		return ret;
697 	}
698 	dev_dbg(map->dev, "Synced register %#x, value %#x\n",
699 		reg, val);
700 
701 	return 0;
702 }
703 
704 static int regcache_sync_block_single(struct regmap *map, void *block,
705 				      unsigned long *cache_present,
706 				      unsigned int block_base,
707 				      unsigned int start, unsigned int end)
708 {
709 	unsigned int i, regtmp, val;
710 	int ret;
711 
712 	for (i = start; i < end; i++) {
713 		regtmp = block_base + (i * map->reg_stride);
714 
715 		if (!regcache_reg_present(cache_present, i) ||
716 		    !regmap_writeable(map, regtmp))
717 			continue;
718 
719 		val = regcache_get_val(map, block, i);
720 		ret = regcache_sync_val(map, regtmp, val);
721 		if (ret != 0)
722 			return ret;
723 	}
724 
725 	return 0;
726 }
727 
728 static int regcache_sync_block_raw_flush(struct regmap *map, const void **data,
729 					 unsigned int base, unsigned int cur)
730 {
731 	size_t val_bytes = map->format.val_bytes;
732 	int ret, count;
733 
734 	if (*data == NULL)
735 		return 0;
736 
737 	count = (cur - base) / map->reg_stride;
738 
739 	dev_dbg(map->dev, "Writing %zu bytes for %d registers from 0x%x-0x%x\n",
740 		count * val_bytes, count, base, cur - map->reg_stride);
741 
742 	map->cache_bypass = true;
743 
744 	ret = _regmap_raw_write(map, base, *data, count * val_bytes, false);
745 	if (ret)
746 		dev_err(map->dev, "Unable to sync registers %#x-%#x. %d\n",
747 			base, cur - map->reg_stride, ret);
748 
749 	map->cache_bypass = false;
750 
751 	*data = NULL;
752 
753 	return ret;
754 }
755 
756 static int regcache_sync_block_raw(struct regmap *map, void *block,
757 			    unsigned long *cache_present,
758 			    unsigned int block_base, unsigned int start,
759 			    unsigned int end)
760 {
761 	unsigned int i, val;
762 	unsigned int regtmp = 0;
763 	unsigned int base = 0;
764 	const void *data = NULL;
765 	int ret;
766 
767 	for (i = start; i < end; i++) {
768 		regtmp = block_base + (i * map->reg_stride);
769 
770 		if (!regcache_reg_present(cache_present, i) ||
771 		    !regmap_writeable(map, regtmp)) {
772 			ret = regcache_sync_block_raw_flush(map, &data,
773 							    base, regtmp);
774 			if (ret != 0)
775 				return ret;
776 			continue;
777 		}
778 
779 		val = regcache_get_val(map, block, i);
780 		if (!regcache_reg_needs_sync(map, regtmp, val)) {
781 			ret = regcache_sync_block_raw_flush(map, &data,
782 							    base, regtmp);
783 			if (ret != 0)
784 				return ret;
785 			continue;
786 		}
787 
788 		if (!data) {
789 			data = regcache_get_val_addr(map, block, i);
790 			base = regtmp;
791 		}
792 	}
793 
794 	return regcache_sync_block_raw_flush(map, &data, base, regtmp +
795 			map->reg_stride);
796 }
797 
798 int regcache_sync_block(struct regmap *map, void *block,
799 			unsigned long *cache_present,
800 			unsigned int block_base, unsigned int start,
801 			unsigned int end)
802 {
803 	if (regmap_can_raw_write(map) && !map->use_single_write)
804 		return regcache_sync_block_raw(map, block, cache_present,
805 					       block_base, start, end);
806 	else
807 		return regcache_sync_block_single(map, block, cache_present,
808 						  block_base, start, end);
809 }
810