1f033c26dSMark Brown // SPDX-License-Identifier: GPL-2.0
2f033c26dSMark Brown //
3f033c26dSMark Brown // Register cache access API - maple tree based cache
4f033c26dSMark Brown //
5f033c26dSMark Brown // Copyright 2023 Arm, Ltd
6f033c26dSMark Brown //
7f033c26dSMark Brown // Author: Mark Brown <[email protected]>
8f033c26dSMark Brown 
9f033c26dSMark Brown #include <linux/debugfs.h>
10f033c26dSMark Brown #include <linux/device.h>
11f033c26dSMark Brown #include <linux/maple_tree.h>
12f033c26dSMark Brown #include <linux/slab.h>
13f033c26dSMark Brown 
14f033c26dSMark Brown #include "internal.h"
15f033c26dSMark Brown 
regcache_maple_read(struct regmap * map,unsigned int reg,unsigned int * value)16f033c26dSMark Brown static int regcache_maple_read(struct regmap *map,
17f033c26dSMark Brown 			       unsigned int reg, unsigned int *value)
18f033c26dSMark Brown {
19f033c26dSMark Brown 	struct maple_tree *mt = map->cache;
20f033c26dSMark Brown 	MA_STATE(mas, mt, reg, reg);
21f033c26dSMark Brown 	unsigned long *entry;
22f033c26dSMark Brown 
23f033c26dSMark Brown 	rcu_read_lock();
24f033c26dSMark Brown 
25fac79badSMark Brown 	entry = mas_walk(&mas);
26f033c26dSMark Brown 	if (!entry) {
27f033c26dSMark Brown 		rcu_read_unlock();
28f033c26dSMark Brown 		return -ENOENT;
29f033c26dSMark Brown 	}
30f033c26dSMark Brown 
31f033c26dSMark Brown 	*value = entry[reg - mas.index];
32f033c26dSMark Brown 
33f033c26dSMark Brown 	rcu_read_unlock();
34f033c26dSMark Brown 
35f033c26dSMark Brown 	return 0;
36f033c26dSMark Brown }
37f033c26dSMark Brown 
regcache_maple_write(struct regmap * map,unsigned int reg,unsigned int val)38f033c26dSMark Brown static int regcache_maple_write(struct regmap *map, unsigned int reg,
39f033c26dSMark Brown 				unsigned int val)
40f033c26dSMark Brown {
41f033c26dSMark Brown 	struct maple_tree *mt = map->cache;
42f033c26dSMark Brown 	MA_STATE(mas, mt, reg, reg);
43f033c26dSMark Brown 	unsigned long *entry, *upper, *lower;
44f033c26dSMark Brown 	unsigned long index, last;
45f033c26dSMark Brown 	size_t lower_sz, upper_sz;
46f033c26dSMark Brown 	int ret;
47f033c26dSMark Brown 
48f033c26dSMark Brown 	rcu_read_lock();
49f033c26dSMark Brown 
50fac79badSMark Brown 	entry = mas_walk(&mas);
51f033c26dSMark Brown 	if (entry) {
52f033c26dSMark Brown 		entry[reg - mas.index] = val;
53f033c26dSMark Brown 		rcu_read_unlock();
54f033c26dSMark Brown 		return 0;
55f033c26dSMark Brown 	}
56f033c26dSMark Brown 
57f033c26dSMark Brown 	/* Any adjacent entries to extend/merge? */
58f033c26dSMark Brown 	mas_set_range(&mas, reg - 1, reg + 1);
59f033c26dSMark Brown 	index = reg;
60f033c26dSMark Brown 	last = reg;
61f033c26dSMark Brown 
62f033c26dSMark Brown 	lower = mas_find(&mas, reg - 1);
63f033c26dSMark Brown 	if (lower) {
64f033c26dSMark Brown 		index = mas.index;
65f033c26dSMark Brown 		lower_sz = (mas.last - mas.index + 1) * sizeof(unsigned long);
66f033c26dSMark Brown 	}
67f033c26dSMark Brown 
68f033c26dSMark Brown 	upper = mas_find(&mas, reg + 1);
69f033c26dSMark Brown 	if (upper) {
70f033c26dSMark Brown 		last = mas.last;
71f033c26dSMark Brown 		upper_sz = (mas.last - mas.index + 1) * sizeof(unsigned long);
72f033c26dSMark Brown 	}
73f033c26dSMark Brown 
74f033c26dSMark Brown 	rcu_read_unlock();
75f033c26dSMark Brown 
76*37c95f02SAndy Shevchenko 	entry = kmalloc_array(last - index + 1, sizeof(*entry), map->alloc_flags);
77f033c26dSMark Brown 	if (!entry)
78f033c26dSMark Brown 		return -ENOMEM;
79f033c26dSMark Brown 
80f033c26dSMark Brown 	if (lower)
81f033c26dSMark Brown 		memcpy(entry, lower, lower_sz);
82f033c26dSMark Brown 	entry[reg - index] = val;
83f033c26dSMark Brown 	if (upper)
84f033c26dSMark Brown 		memcpy(&entry[reg - index + 1], upper, upper_sz);
85f033c26dSMark Brown 
86f033c26dSMark Brown 	/*
87f033c26dSMark Brown 	 * This is safe because the regmap lock means the Maple lock
88f033c26dSMark Brown 	 * is redundant, but we need to take it due to lockdep asserts
89f033c26dSMark Brown 	 * in the maple tree code.
90f033c26dSMark Brown 	 */
91f033c26dSMark Brown 	mas_lock(&mas);
92f033c26dSMark Brown 
93f033c26dSMark Brown 	mas_set_range(&mas, index, last);
94b0393e1fSGuenter Roeck 	ret = mas_store_gfp(&mas, entry, map->alloc_flags);
95f033c26dSMark Brown 
96f033c26dSMark Brown 	mas_unlock(&mas);
97f033c26dSMark Brown 
98f033c26dSMark Brown 	if (ret == 0) {
99f033c26dSMark Brown 		kfree(lower);
100f033c26dSMark Brown 		kfree(upper);
101f033c26dSMark Brown 	}
102f033c26dSMark Brown 
103f033c26dSMark Brown 	return ret;
104f033c26dSMark Brown }
105f033c26dSMark Brown 
regcache_maple_drop(struct regmap * map,unsigned int min,unsigned int max)106f033c26dSMark Brown static int regcache_maple_drop(struct regmap *map, unsigned int min,
107f033c26dSMark Brown 			       unsigned int max)
108f033c26dSMark Brown {
109f033c26dSMark Brown 	struct maple_tree *mt = map->cache;
110f033c26dSMark Brown 	MA_STATE(mas, mt, min, max);
111f033c26dSMark Brown 	unsigned long *entry, *lower, *upper;
112542440fdSArnd Bergmann 	/* initialized to work around false-positive -Wuninitialized warning */
113542440fdSArnd Bergmann 	unsigned long lower_index = 0, lower_last = 0;
114f033c26dSMark Brown 	unsigned long upper_index, upper_last;
115eaa03486SRichard Fitzgerald 	int ret = 0;
116f033c26dSMark Brown 
117f033c26dSMark Brown 	lower = NULL;
118f033c26dSMark Brown 	upper = NULL;
119f033c26dSMark Brown 
120f033c26dSMark Brown 	mas_lock(&mas);
121f033c26dSMark Brown 
122f033c26dSMark Brown 	mas_for_each(&mas, entry, max) {
123f033c26dSMark Brown 		/*
124f033c26dSMark Brown 		 * This is safe because the regmap lock means the
125f033c26dSMark Brown 		 * Maple lock is redundant, but we need to take it due
126f033c26dSMark Brown 		 * to lockdep asserts in the maple tree code.
127f033c26dSMark Brown 		 */
128f033c26dSMark Brown 		mas_unlock(&mas);
129f033c26dSMark Brown 
130f033c26dSMark Brown 		/* Do we need to save any of this entry? */
131f033c26dSMark Brown 		if (mas.index < min) {
132f033c26dSMark Brown 			lower_index = mas.index;
133f033c26dSMark Brown 			lower_last = min -1;
134f033c26dSMark Brown 
135bce84306SAndy Shevchenko 			lower = kmemdup_array(entry,
136bce84306SAndy Shevchenko 					      min - mas.index, sizeof(*lower),
137b0393e1fSGuenter Roeck 					      map->alloc_flags);
138f033c26dSMark Brown 			if (!lower) {
139f033c26dSMark Brown 				ret = -ENOMEM;
140451941acSMark Brown 				goto out_unlocked;
141f033c26dSMark Brown 			}
142f033c26dSMark Brown 		}
143f033c26dSMark Brown 
144f033c26dSMark Brown 		if (mas.last > max) {
145f033c26dSMark Brown 			upper_index = max + 1;
146f033c26dSMark Brown 			upper_last = mas.last;
147f033c26dSMark Brown 
148bce84306SAndy Shevchenko 			upper = kmemdup_array(&entry[max - mas.index + 1],
149bce84306SAndy Shevchenko 					      mas.last - max, sizeof(*upper),
150b0393e1fSGuenter Roeck 					      map->alloc_flags);
151f033c26dSMark Brown 			if (!upper) {
152f033c26dSMark Brown 				ret = -ENOMEM;
153451941acSMark Brown 				goto out_unlocked;
154f033c26dSMark Brown 			}
155f033c26dSMark Brown 		}
156f033c26dSMark Brown 
157f033c26dSMark Brown 		kfree(entry);
158f033c26dSMark Brown 		mas_lock(&mas);
159f033c26dSMark Brown 		mas_erase(&mas);
160f033c26dSMark Brown 
161f033c26dSMark Brown 		/* Insert new nodes with the saved data */
162f033c26dSMark Brown 		if (lower) {
163f033c26dSMark Brown 			mas_set_range(&mas, lower_index, lower_last);
164b0393e1fSGuenter Roeck 			ret = mas_store_gfp(&mas, lower, map->alloc_flags);
165f033c26dSMark Brown 			if (ret != 0)
166f033c26dSMark Brown 				goto out;
167f033c26dSMark Brown 			lower = NULL;
168f033c26dSMark Brown 		}
169f033c26dSMark Brown 
170f033c26dSMark Brown 		if (upper) {
171f033c26dSMark Brown 			mas_set_range(&mas, upper_index, upper_last);
172b0393e1fSGuenter Roeck 			ret = mas_store_gfp(&mas, upper, map->alloc_flags);
173f033c26dSMark Brown 			if (ret != 0)
174f033c26dSMark Brown 				goto out;
175f033c26dSMark Brown 			upper = NULL;
176f033c26dSMark Brown 		}
177f033c26dSMark Brown 	}
178f033c26dSMark Brown 
179f033c26dSMark Brown out:
180f033c26dSMark Brown 	mas_unlock(&mas);
181451941acSMark Brown out_unlocked:
182f033c26dSMark Brown 	kfree(lower);
183f033c26dSMark Brown 	kfree(upper);
184f033c26dSMark Brown 
185f033c26dSMark Brown 	return ret;
186f033c26dSMark Brown }
187f033c26dSMark Brown 
regcache_maple_sync_block(struct regmap * map,unsigned long * entry,struct ma_state * mas,unsigned int min,unsigned int max)188bfa0b38cSMark Brown static int regcache_maple_sync_block(struct regmap *map, unsigned long *entry,
189bfa0b38cSMark Brown 				     struct ma_state *mas,
190bfa0b38cSMark Brown 				     unsigned int min, unsigned int max)
191bfa0b38cSMark Brown {
192bfa0b38cSMark Brown 	void *buf;
193bfa0b38cSMark Brown 	unsigned long r;
194bfa0b38cSMark Brown 	size_t val_bytes = map->format.val_bytes;
195bfa0b38cSMark Brown 	int ret = 0;
196bfa0b38cSMark Brown 
197bfa0b38cSMark Brown 	mas_pause(mas);
198bfa0b38cSMark Brown 	rcu_read_unlock();
199bfa0b38cSMark Brown 
200bfa0b38cSMark Brown 	/*
201bfa0b38cSMark Brown 	 * Use a raw write if writing more than one register to a
202bfa0b38cSMark Brown 	 * device that supports raw writes to reduce transaction
203bfa0b38cSMark Brown 	 * overheads.
204bfa0b38cSMark Brown 	 */
205bfa0b38cSMark Brown 	if (max - min > 1 && regmap_can_raw_write(map)) {
206*37c95f02SAndy Shevchenko 		buf = kmalloc_array(max - min, val_bytes, map->alloc_flags);
207bfa0b38cSMark Brown 		if (!buf) {
208bfa0b38cSMark Brown 			ret = -ENOMEM;
209bfa0b38cSMark Brown 			goto out;
210bfa0b38cSMark Brown 		}
211bfa0b38cSMark Brown 
212bfa0b38cSMark Brown 		/* Render the data for a raw write */
213bfa0b38cSMark Brown 		for (r = min; r < max; r++) {
214bfa0b38cSMark Brown 			regcache_set_val(map, buf, r - min,
215bfa0b38cSMark Brown 					 entry[r - mas->index]);
216bfa0b38cSMark Brown 		}
217bfa0b38cSMark Brown 
218bfa0b38cSMark Brown 		ret = _regmap_raw_write(map, min, buf, (max - min) * val_bytes,
219bfa0b38cSMark Brown 					false);
220bfa0b38cSMark Brown 
221bfa0b38cSMark Brown 		kfree(buf);
222bfa0b38cSMark Brown 	} else {
223bfa0b38cSMark Brown 		for (r = min; r < max; r++) {
224bfa0b38cSMark Brown 			ret = _regmap_write(map, r,
225bfa0b38cSMark Brown 					    entry[r - mas->index]);
226bfa0b38cSMark Brown 			if (ret != 0)
227bfa0b38cSMark Brown 				goto out;
228bfa0b38cSMark Brown 		}
229bfa0b38cSMark Brown 	}
230bfa0b38cSMark Brown 
231bfa0b38cSMark Brown out:
232bfa0b38cSMark Brown 	rcu_read_lock();
233bfa0b38cSMark Brown 
234bfa0b38cSMark Brown 	return ret;
235bfa0b38cSMark Brown }
236bfa0b38cSMark Brown 
regcache_maple_sync(struct regmap * map,unsigned int min,unsigned int max)237f033c26dSMark Brown static int regcache_maple_sync(struct regmap *map, unsigned int min,
238f033c26dSMark Brown 			       unsigned int max)
239f033c26dSMark Brown {
240f033c26dSMark Brown 	struct maple_tree *mt = map->cache;
241f033c26dSMark Brown 	unsigned long *entry;
242f033c26dSMark Brown 	MA_STATE(mas, mt, min, max);
243f033c26dSMark Brown 	unsigned long lmin = min;
244f033c26dSMark Brown 	unsigned long lmax = max;
245bfa0b38cSMark Brown 	unsigned int r, v, sync_start;
246eaa03486SRichard Fitzgerald 	int ret = 0;
247bfa0b38cSMark Brown 	bool sync_needed = false;
248f033c26dSMark Brown 
249f033c26dSMark Brown 	map->cache_bypass = true;
250f033c26dSMark Brown 
251f033c26dSMark Brown 	rcu_read_lock();
252f033c26dSMark Brown 
253f033c26dSMark Brown 	mas_for_each(&mas, entry, max) {
254f033c26dSMark Brown 		for (r = max(mas.index, lmin); r <= min(mas.last, lmax); r++) {
255bfa0b38cSMark Brown 			v = entry[r - mas.index];
256bfa0b38cSMark Brown 
257bfa0b38cSMark Brown 			if (regcache_reg_needs_sync(map, r, v)) {
258bfa0b38cSMark Brown 				if (!sync_needed) {
259bfa0b38cSMark Brown 					sync_start = r;
260bfa0b38cSMark Brown 					sync_needed = true;
261bfa0b38cSMark Brown 				}
262bfa0b38cSMark Brown 				continue;
263bfa0b38cSMark Brown 			}
264bfa0b38cSMark Brown 
265bfa0b38cSMark Brown 			if (!sync_needed)
266bfa0b38cSMark Brown 				continue;
267bfa0b38cSMark Brown 
268bfa0b38cSMark Brown 			ret = regcache_maple_sync_block(map, entry, &mas,
269bfa0b38cSMark Brown 							sync_start, r);
270f033c26dSMark Brown 			if (ret != 0)
271f033c26dSMark Brown 				goto out;
272bfa0b38cSMark Brown 			sync_needed = false;
273f033c26dSMark Brown 		}
274f033c26dSMark Brown 
275bfa0b38cSMark Brown 		if (sync_needed) {
276bfa0b38cSMark Brown 			ret = regcache_maple_sync_block(map, entry, &mas,
277bfa0b38cSMark Brown 							sync_start, r);
278bfa0b38cSMark Brown 			if (ret != 0)
279bfa0b38cSMark Brown 				goto out;
280bfa0b38cSMark Brown 			sync_needed = false;
281bfa0b38cSMark Brown 		}
282bfa0b38cSMark Brown 	}
283f033c26dSMark Brown 
2840cc65780SMark Brown out:
285bfa0b38cSMark Brown 	rcu_read_unlock();
286bfa0b38cSMark Brown 
287f033c26dSMark Brown 	map->cache_bypass = false;
288f033c26dSMark Brown 
289f033c26dSMark Brown 	return ret;
290f033c26dSMark Brown }
291f033c26dSMark Brown 
regcache_maple_exit(struct regmap * map)292f033c26dSMark Brown static int regcache_maple_exit(struct regmap *map)
293f033c26dSMark Brown {
294f033c26dSMark Brown 	struct maple_tree *mt = map->cache;
295f033c26dSMark Brown 	MA_STATE(mas, mt, 0, UINT_MAX);
296aad6b352SColin Ian King 	unsigned int *entry;
297f033c26dSMark Brown 
298f033c26dSMark Brown 	/* if we've already been called then just return */
299f033c26dSMark Brown 	if (!mt)
300f033c26dSMark Brown 		return 0;
301f033c26dSMark Brown 
302f033c26dSMark Brown 	mas_lock(&mas);
303f033c26dSMark Brown 	mas_for_each(&mas, entry, UINT_MAX)
304f033c26dSMark Brown 		kfree(entry);
305f033c26dSMark Brown 	__mt_destroy(mt);
306f033c26dSMark Brown 	mas_unlock(&mas);
307f033c26dSMark Brown 
308f033c26dSMark Brown 	kfree(mt);
309f033c26dSMark Brown 	map->cache = NULL;
310f033c26dSMark Brown 
311f033c26dSMark Brown 	return 0;
312f033c26dSMark Brown }
313f033c26dSMark Brown 
regcache_maple_insert_block(struct regmap * map,int first,int last)3143a48d212SMark Brown static int regcache_maple_insert_block(struct regmap *map, int first,
3153a48d212SMark Brown 					int last)
3163a48d212SMark Brown {
3173a48d212SMark Brown 	struct maple_tree *mt = map->cache;
3183a48d212SMark Brown 	MA_STATE(mas, mt, first, last);
3193a48d212SMark Brown 	unsigned long *entry;
3203a48d212SMark Brown 	int i, ret;
3213a48d212SMark Brown 
322*37c95f02SAndy Shevchenko 	entry = kmalloc_array(last - first + 1, sizeof(*entry), map->alloc_flags);
3233a48d212SMark Brown 	if (!entry)
3243a48d212SMark Brown 		return -ENOMEM;
3253a48d212SMark Brown 
3263a48d212SMark Brown 	for (i = 0; i < last - first + 1; i++)
3273a48d212SMark Brown 		entry[i] = map->reg_defaults[first + i].def;
3283a48d212SMark Brown 
3293a48d212SMark Brown 	mas_lock(&mas);
3303a48d212SMark Brown 
3313a48d212SMark Brown 	mas_set_range(&mas, map->reg_defaults[first].reg,
3323a48d212SMark Brown 		      map->reg_defaults[last].reg);
333b0393e1fSGuenter Roeck 	ret = mas_store_gfp(&mas, entry, map->alloc_flags);
3343a48d212SMark Brown 
3353a48d212SMark Brown 	mas_unlock(&mas);
3363a48d212SMark Brown 
3373a48d212SMark Brown 	if (ret)
3383a48d212SMark Brown 		kfree(entry);
3393a48d212SMark Brown 
3403a48d212SMark Brown 	return ret;
3413a48d212SMark Brown }
3423a48d212SMark Brown 
regcache_maple_init(struct regmap * map)343f033c26dSMark Brown static int regcache_maple_init(struct regmap *map)
344f033c26dSMark Brown {
345f033c26dSMark Brown 	struct maple_tree *mt;
346f033c26dSMark Brown 	int i;
347f033c26dSMark Brown 	int ret;
3483a48d212SMark Brown 	int range_start;
349f033c26dSMark Brown 
350ae0acef3SMarek Szyprowski 	mt = kmalloc(sizeof(*mt), map->alloc_flags);
351f033c26dSMark Brown 	if (!mt)
352f033c26dSMark Brown 		return -ENOMEM;
353f033c26dSMark Brown 	map->cache = mt;
354f033c26dSMark Brown 
355f033c26dSMark Brown 	mt_init(mt);
356f033c26dSMark Brown 
3571ed9b927SCristian Ciocaltea 	if (!mt_external_lock(mt) && map->lock_key)
3581ed9b927SCristian Ciocaltea 		lockdep_set_class_and_subclass(&mt->ma_lock, map->lock_key, 1);
3591ed9b927SCristian Ciocaltea 
3603a48d212SMark Brown 	if (!map->num_reg_defaults)
3613a48d212SMark Brown 		return 0;
3623a48d212SMark Brown 
3633a48d212SMark Brown 	range_start = 0;
3643a48d212SMark Brown 
3653a48d212SMark Brown 	/* Scan for ranges of contiguous registers */
3663a48d212SMark Brown 	for (i = 1; i < map->num_reg_defaults; i++) {
3673a48d212SMark Brown 		if (map->reg_defaults[i].reg !=
3683a48d212SMark Brown 		    map->reg_defaults[i - 1].reg + 1) {
3693a48d212SMark Brown 			ret = regcache_maple_insert_block(map, range_start,
3703a48d212SMark Brown 							  i - 1);
3713a48d212SMark Brown 			if (ret != 0)
372f033c26dSMark Brown 				goto err;
3733a48d212SMark Brown 
3743a48d212SMark Brown 			range_start = i;
375f033c26dSMark Brown 		}
3763a48d212SMark Brown 	}
3773a48d212SMark Brown 
3783a48d212SMark Brown 	/* Add the last block */
3793a48d212SMark Brown 	ret = regcache_maple_insert_block(map, range_start,
3803a48d212SMark Brown 					  map->num_reg_defaults - 1);
3813a48d212SMark Brown 	if (ret != 0)
3823a48d212SMark Brown 		goto err;
383f033c26dSMark Brown 
384f033c26dSMark Brown 	return 0;
385f033c26dSMark Brown 
386f033c26dSMark Brown err:
387f033c26dSMark Brown 	regcache_maple_exit(map);
388f033c26dSMark Brown 	return ret;
389f033c26dSMark Brown }
390f033c26dSMark Brown 
391f033c26dSMark Brown struct regcache_ops regcache_maple_ops = {
392f033c26dSMark Brown 	.type = REGCACHE_MAPLE,
393f033c26dSMark Brown 	.name = "maple",
394f033c26dSMark Brown 	.init = regcache_maple_init,
395f033c26dSMark Brown 	.exit = regcache_maple_exit,
396f033c26dSMark Brown 	.read = regcache_maple_read,
397f033c26dSMark Brown 	.write = regcache_maple_write,
398f033c26dSMark Brown 	.drop = regcache_maple_drop,
399f033c26dSMark Brown 	.sync = regcache_maple_sync,
400f033c26dSMark Brown };
401