xref: /linux-6.15/include/linux/host1x.h (revision fe696ccb)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * Copyright (c) 2009-2013, NVIDIA Corporation. All rights reserved.
4  */
5 
6 #ifndef __LINUX_HOST1X_H
7 #define __LINUX_HOST1X_H
8 
9 #include <linux/device.h>
10 #include <linux/dma-direction.h>
11 #include <linux/spinlock.h>
12 #include <linux/types.h>
13 
14 enum host1x_class {
15 	HOST1X_CLASS_HOST1X = 0x1,
16 	HOST1X_CLASS_GR2D = 0x51,
17 	HOST1X_CLASS_GR2D_SB = 0x52,
18 	HOST1X_CLASS_VIC = 0x5D,
19 	HOST1X_CLASS_GR3D = 0x60,
20 	HOST1X_CLASS_NVDEC = 0xF0,
21 	HOST1X_CLASS_NVDEC1 = 0xF5,
22 };
23 
24 struct host1x;
25 struct host1x_client;
26 struct iommu_group;
27 
28 u64 host1x_get_dma_mask(struct host1x *host1x);
29 
30 /**
31  * struct host1x_bo_cache - host1x buffer object cache
32  * @mappings: list of mappings
33  * @lock: synchronizes accesses to the list of mappings
34  */
35 struct host1x_bo_cache {
36 	struct list_head mappings;
37 	struct mutex lock;
38 };
39 
40 static inline void host1x_bo_cache_init(struct host1x_bo_cache *cache)
41 {
42 	INIT_LIST_HEAD(&cache->mappings);
43 	mutex_init(&cache->lock);
44 }
45 
46 static inline void host1x_bo_cache_destroy(struct host1x_bo_cache *cache)
47 {
48 	/* XXX warn if not empty? */
49 	mutex_destroy(&cache->lock);
50 }
51 
52 /**
53  * struct host1x_client_ops - host1x client operations
54  * @early_init: host1x client early initialization code
55  * @init: host1x client initialization code
56  * @exit: host1x client tear down code
57  * @late_exit: host1x client late tear down code
58  * @suspend: host1x client suspend code
59  * @resume: host1x client resume code
60  */
61 struct host1x_client_ops {
62 	int (*early_init)(struct host1x_client *client);
63 	int (*init)(struct host1x_client *client);
64 	int (*exit)(struct host1x_client *client);
65 	int (*late_exit)(struct host1x_client *client);
66 	int (*suspend)(struct host1x_client *client);
67 	int (*resume)(struct host1x_client *client);
68 };
69 
70 /**
71  * struct host1x_client - host1x client structure
72  * @list: list node for the host1x client
73  * @host: pointer to struct device representing the host1x controller
74  * @dev: pointer to struct device backing this host1x client
75  * @group: IOMMU group that this client is a member of
76  * @ops: host1x client operations
77  * @class: host1x class represented by this client
78  * @channel: host1x channel associated with this client
79  * @syncpts: array of syncpoints requested for this client
80  * @num_syncpts: number of syncpoints requested for this client
81  * @parent: pointer to parent structure
82  * @usecount: reference count for this structure
83  * @lock: mutex for mutually exclusive concurrency
84  * @cache: host1x buffer object cache
85  */
86 struct host1x_client {
87 	struct list_head list;
88 	struct device *host;
89 	struct device *dev;
90 	struct iommu_group *group;
91 
92 	const struct host1x_client_ops *ops;
93 
94 	enum host1x_class class;
95 	struct host1x_channel *channel;
96 
97 	struct host1x_syncpt **syncpts;
98 	unsigned int num_syncpts;
99 
100 	struct host1x_client *parent;
101 	unsigned int usecount;
102 	struct mutex lock;
103 
104 	struct host1x_bo_cache cache;
105 };
106 
107 /*
108  * host1x buffer objects
109  */
110 
111 struct host1x_bo;
112 struct sg_table;
113 
114 struct host1x_bo_mapping {
115 	struct kref ref;
116 	struct dma_buf_attachment *attach;
117 	enum dma_data_direction direction;
118 	struct list_head list;
119 	struct host1x_bo *bo;
120 	struct sg_table *sgt;
121 	unsigned int chunks;
122 	struct device *dev;
123 	dma_addr_t phys;
124 	size_t size;
125 
126 	struct host1x_bo_cache *cache;
127 	struct list_head entry;
128 };
129 
130 static inline struct host1x_bo_mapping *to_host1x_bo_mapping(struct kref *ref)
131 {
132 	return container_of(ref, struct host1x_bo_mapping, ref);
133 }
134 
135 struct host1x_bo_ops {
136 	struct host1x_bo *(*get)(struct host1x_bo *bo);
137 	void (*put)(struct host1x_bo *bo);
138 	struct host1x_bo_mapping *(*pin)(struct device *dev, struct host1x_bo *bo,
139 					 enum dma_data_direction dir);
140 	void (*unpin)(struct host1x_bo_mapping *map);
141 	void *(*mmap)(struct host1x_bo *bo);
142 	void (*munmap)(struct host1x_bo *bo, void *addr);
143 };
144 
145 struct host1x_bo {
146 	const struct host1x_bo_ops *ops;
147 	struct list_head mappings;
148 	spinlock_t lock;
149 };
150 
151 static inline void host1x_bo_init(struct host1x_bo *bo,
152 				  const struct host1x_bo_ops *ops)
153 {
154 	INIT_LIST_HEAD(&bo->mappings);
155 	spin_lock_init(&bo->lock);
156 	bo->ops = ops;
157 }
158 
159 static inline struct host1x_bo *host1x_bo_get(struct host1x_bo *bo)
160 {
161 	return bo->ops->get(bo);
162 }
163 
164 static inline void host1x_bo_put(struct host1x_bo *bo)
165 {
166 	bo->ops->put(bo);
167 }
168 
169 struct host1x_bo_mapping *host1x_bo_pin(struct device *dev, struct host1x_bo *bo,
170 					enum dma_data_direction dir,
171 					struct host1x_bo_cache *cache);
172 void host1x_bo_unpin(struct host1x_bo_mapping *map);
173 
174 static inline void *host1x_bo_mmap(struct host1x_bo *bo)
175 {
176 	return bo->ops->mmap(bo);
177 }
178 
179 static inline void host1x_bo_munmap(struct host1x_bo *bo, void *addr)
180 {
181 	bo->ops->munmap(bo, addr);
182 }
183 
184 /*
185  * host1x syncpoints
186  */
187 
188 #define HOST1X_SYNCPT_CLIENT_MANAGED	(1 << 0)
189 #define HOST1X_SYNCPT_HAS_BASE		(1 << 1)
190 
191 struct host1x_syncpt_base;
192 struct host1x_syncpt;
193 struct host1x;
194 
195 struct host1x_syncpt *host1x_syncpt_get_by_id(struct host1x *host, u32 id);
196 struct host1x_syncpt *host1x_syncpt_get_by_id_noref(struct host1x *host, u32 id);
197 struct host1x_syncpt *host1x_syncpt_get(struct host1x_syncpt *sp);
198 u32 host1x_syncpt_id(struct host1x_syncpt *sp);
199 u32 host1x_syncpt_read_min(struct host1x_syncpt *sp);
200 u32 host1x_syncpt_read_max(struct host1x_syncpt *sp);
201 u32 host1x_syncpt_read(struct host1x_syncpt *sp);
202 int host1x_syncpt_incr(struct host1x_syncpt *sp);
203 u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs);
204 int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
205 		       u32 *value);
206 struct host1x_syncpt *host1x_syncpt_request(struct host1x_client *client,
207 					    unsigned long flags);
208 void host1x_syncpt_put(struct host1x_syncpt *sp);
209 struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
210 					  unsigned long flags,
211 					  const char *name);
212 
213 struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp);
214 u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base);
215 
216 void host1x_syncpt_release_vblank_reservation(struct host1x_client *client,
217 					      u32 syncpt_id);
218 
219 struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold);
220 
221 /*
222  * host1x channel
223  */
224 
225 struct host1x_channel;
226 struct host1x_job;
227 
228 struct host1x_channel *host1x_channel_request(struct host1x_client *client);
229 struct host1x_channel *host1x_channel_get(struct host1x_channel *channel);
230 void host1x_channel_stop(struct host1x_channel *channel);
231 void host1x_channel_put(struct host1x_channel *channel);
232 int host1x_job_submit(struct host1x_job *job);
233 
234 /*
235  * host1x job
236  */
237 
238 #define HOST1X_RELOC_READ	(1 << 0)
239 #define HOST1X_RELOC_WRITE	(1 << 1)
240 
241 struct host1x_reloc {
242 	struct {
243 		struct host1x_bo *bo;
244 		unsigned long offset;
245 	} cmdbuf;
246 	struct {
247 		struct host1x_bo *bo;
248 		unsigned long offset;
249 	} target;
250 	unsigned long shift;
251 	unsigned long flags;
252 };
253 
254 struct host1x_job {
255 	/* When refcount goes to zero, job can be freed */
256 	struct kref ref;
257 
258 	/* List entry */
259 	struct list_head list;
260 
261 	/* Channel where job is submitted to */
262 	struct host1x_channel *channel;
263 
264 	/* client where the job originated */
265 	struct host1x_client *client;
266 
267 	/* Gathers and their memory */
268 	struct host1x_job_cmd *cmds;
269 	unsigned int num_cmds;
270 
271 	/* Array of handles to be pinned & unpinned */
272 	struct host1x_reloc *relocs;
273 	unsigned int num_relocs;
274 	struct host1x_job_unpin_data *unpins;
275 	unsigned int num_unpins;
276 
277 	dma_addr_t *addr_phys;
278 	dma_addr_t *gather_addr_phys;
279 	dma_addr_t *reloc_addr_phys;
280 
281 	/* Sync point id, number of increments and end related to the submit */
282 	struct host1x_syncpt *syncpt;
283 	u32 syncpt_incrs;
284 	u32 syncpt_end;
285 
286 	/* Completion waiter ref */
287 	void *waiter;
288 
289 	/* Maximum time to wait for this job */
290 	unsigned int timeout;
291 
292 	/* Job has timed out and should be released */
293 	bool cancelled;
294 
295 	/* Index and number of slots used in the push buffer */
296 	unsigned int first_get;
297 	unsigned int num_slots;
298 
299 	/* Copy of gathers */
300 	size_t gather_copy_size;
301 	dma_addr_t gather_copy;
302 	u8 *gather_copy_mapped;
303 
304 	/* Check if register is marked as an address reg */
305 	int (*is_addr_reg)(struct device *dev, u32 class, u32 reg);
306 
307 	/* Check if class belongs to the unit */
308 	int (*is_valid_class)(u32 class);
309 
310 	/* Request a SETCLASS to this class */
311 	u32 class;
312 
313 	/* Add a channel wait for previous ops to complete */
314 	bool serialize;
315 
316 	/* Fast-forward syncpoint increments on job timeout */
317 	bool syncpt_recovery;
318 
319 	/* Callback called when job is freed */
320 	void (*release)(struct host1x_job *job);
321 	void *user_data;
322 
323 	/* Whether host1x-side firewall should be ran for this job or not */
324 	bool enable_firewall;
325 };
326 
327 struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
328 				    u32 num_cmdbufs, u32 num_relocs,
329 				    bool skip_firewall);
330 void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
331 			   unsigned int words, unsigned int offset);
332 void host1x_job_add_wait(struct host1x_job *job, u32 id, u32 thresh,
333 			 bool relative, u32 next_class);
334 struct host1x_job *host1x_job_get(struct host1x_job *job);
335 void host1x_job_put(struct host1x_job *job);
336 int host1x_job_pin(struct host1x_job *job, struct device *dev);
337 void host1x_job_unpin(struct host1x_job *job);
338 
339 /*
340  * subdevice probe infrastructure
341  */
342 
343 struct host1x_device;
344 
345 /**
346  * struct host1x_driver - host1x logical device driver
347  * @driver: core driver
348  * @subdevs: table of OF device IDs matching subdevices for this driver
349  * @list: list node for the driver
350  * @probe: called when the host1x logical device is probed
351  * @remove: called when the host1x logical device is removed
352  * @shutdown: called when the host1x logical device is shut down
353  */
354 struct host1x_driver {
355 	struct device_driver driver;
356 
357 	const struct of_device_id *subdevs;
358 	struct list_head list;
359 
360 	int (*probe)(struct host1x_device *device);
361 	int (*remove)(struct host1x_device *device);
362 	void (*shutdown)(struct host1x_device *device);
363 };
364 
365 static inline struct host1x_driver *
366 to_host1x_driver(struct device_driver *driver)
367 {
368 	return container_of(driver, struct host1x_driver, driver);
369 }
370 
371 int host1x_driver_register_full(struct host1x_driver *driver,
372 				struct module *owner);
373 void host1x_driver_unregister(struct host1x_driver *driver);
374 
375 #define host1x_driver_register(driver) \
376 	host1x_driver_register_full(driver, THIS_MODULE)
377 
378 struct host1x_device {
379 	struct host1x_driver *driver;
380 	struct list_head list;
381 	struct device dev;
382 
383 	struct mutex subdevs_lock;
384 	struct list_head subdevs;
385 	struct list_head active;
386 
387 	struct mutex clients_lock;
388 	struct list_head clients;
389 
390 	bool registered;
391 
392 	struct device_dma_parameters dma_parms;
393 };
394 
395 static inline struct host1x_device *to_host1x_device(struct device *dev)
396 {
397 	return container_of(dev, struct host1x_device, dev);
398 }
399 
400 int host1x_device_init(struct host1x_device *device);
401 int host1x_device_exit(struct host1x_device *device);
402 
403 void __host1x_client_init(struct host1x_client *client, struct lock_class_key *key);
404 void host1x_client_exit(struct host1x_client *client);
405 
406 #define host1x_client_init(client)			\
407 	({						\
408 		static struct lock_class_key __key;	\
409 		__host1x_client_init(client, &__key);	\
410 	})
411 
412 int __host1x_client_register(struct host1x_client *client);
413 
414 /*
415  * Note that this wrapper calls __host1x_client_init() for compatibility
416  * with existing callers. Callers that want to separately initialize and
417  * register a host1x client must first initialize using either of the
418  * __host1x_client_init() or host1x_client_init() functions and then use
419  * the low-level __host1x_client_register() function to avoid the client
420  * getting reinitialized.
421  */
422 #define host1x_client_register(client)			\
423 	({						\
424 		static struct lock_class_key __key;	\
425 		__host1x_client_init(client, &__key);	\
426 		__host1x_client_register(client);	\
427 	})
428 
429 int host1x_client_unregister(struct host1x_client *client);
430 
431 int host1x_client_suspend(struct host1x_client *client);
432 int host1x_client_resume(struct host1x_client *client);
433 
434 struct tegra_mipi_device;
435 
436 struct tegra_mipi_device *tegra_mipi_request(struct device *device,
437 					     struct device_node *np);
438 void tegra_mipi_free(struct tegra_mipi_device *device);
439 int tegra_mipi_enable(struct tegra_mipi_device *device);
440 int tegra_mipi_disable(struct tegra_mipi_device *device);
441 int tegra_mipi_start_calibration(struct tegra_mipi_device *device);
442 int tegra_mipi_finish_calibration(struct tegra_mipi_device *device);
443 
444 #endif
445