1 /*
2  * Copyright 2008 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Dave Airlie
25  *          Alex Deucher
26  *          Jerome Glisse
27  *          Christian König
28  */
29 #include <linux/seq_file.h>
30 #include <linux/slab.h>
31 #include <linux/debugfs.h>
32 #include <drm/drmP.h>
33 #include <drm/amdgpu_drm.h>
34 #include "amdgpu.h"
35 #include "atom.h"
36 
37 /*
38  * Rings
39  * Most engines on the GPU are fed via ring buffers.  Ring
40  * buffers are areas of GPU accessible memory that the host
41  * writes commands into and the GPU reads commands out of.
42  * There is a rptr (read pointer) that determines where the
43  * GPU is currently reading, and a wptr (write pointer)
44  * which determines where the host has written.  When the
45  * pointers are equal, the ring is idle.  When the host
46  * writes commands to the ring buffer, it increments the
47  * wptr.  The GPU then starts fetching commands and executes
48  * them until the pointers are equal again.
49  */
50 static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
51 				    struct amdgpu_ring *ring);
52 static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring);
53 
54 /**
55  * amdgpu_ring_alloc - allocate space on the ring buffer
56  *
57  * @adev: amdgpu_device pointer
58  * @ring: amdgpu_ring structure holding ring information
59  * @ndw: number of dwords to allocate in the ring buffer
60  *
61  * Allocate @ndw dwords in the ring buffer (all asics).
62  * Returns 0 on success, error on failure.
63  */
64 int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw)
65 {
66 	/* Align requested size with padding so unlock_commit can
67 	 * pad safely */
68 	ndw = (ndw + ring->align_mask) & ~ring->align_mask;
69 
70 	/* Make sure we aren't trying to allocate more space
71 	 * than the maximum for one submission
72 	 */
73 	if (WARN_ON_ONCE(ndw > ring->max_dw))
74 		return -ENOMEM;
75 
76 	ring->count_dw = ndw;
77 	ring->wptr_old = ring->wptr;
78 	return 0;
79 }
80 
81 /** amdgpu_ring_insert_nop - insert NOP packets
82  *
83  * @ring: amdgpu_ring structure holding ring information
84  * @count: the number of NOP packets to insert
85  *
86  * This is the generic insert_nop function for rings except SDMA
87  */
88 void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
89 {
90 	int i;
91 
92 	for (i = 0; i < count; i++)
93 		amdgpu_ring_write(ring, ring->nop);
94 }
95 
96 /** amdgpu_ring_generic_pad_ib - pad IB with NOP packets
97  *
98  * @ring: amdgpu_ring structure holding ring information
99  * @ib: IB to add NOP packets to
100  *
101  * This is the generic pad_ib function for rings except SDMA
102  */
103 void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib)
104 {
105 	while (ib->length_dw & ring->align_mask)
106 		ib->ptr[ib->length_dw++] = ring->nop;
107 }
108 
109 /**
110  * amdgpu_ring_commit - tell the GPU to execute the new
111  * commands on the ring buffer
112  *
113  * @adev: amdgpu_device pointer
114  * @ring: amdgpu_ring structure holding ring information
115  *
116  * Update the wptr (write pointer) to tell the GPU to
117  * execute new commands on the ring buffer (all asics).
118  */
119 void amdgpu_ring_commit(struct amdgpu_ring *ring)
120 {
121 	uint32_t count;
122 
123 	/* We pad to match fetch size */
124 	count = ring->align_mask + 1 - (ring->wptr & ring->align_mask);
125 	count %= ring->align_mask + 1;
126 	ring->funcs->insert_nop(ring, count);
127 
128 	mb();
129 	amdgpu_ring_set_wptr(ring);
130 }
131 
132 /**
133  * amdgpu_ring_undo - reset the wptr
134  *
135  * @ring: amdgpu_ring structure holding ring information
136  *
137  * Reset the driver's copy of the wptr (all asics).
138  */
139 void amdgpu_ring_undo(struct amdgpu_ring *ring)
140 {
141 	ring->wptr = ring->wptr_old;
142 }
143 
144 /**
145  * amdgpu_ring_backup - Back up the content of a ring
146  *
147  * @ring: the ring we want to back up
148  *
149  * Saves all unprocessed commits from a ring, returns the number of dwords saved.
150  */
151 unsigned amdgpu_ring_backup(struct amdgpu_ring *ring,
152 			    uint32_t **data)
153 {
154 	unsigned size, ptr, i;
155 
156 	*data = NULL;
157 
158 	if (ring->ring_obj == NULL)
159 		return 0;
160 
161 	/* it doesn't make sense to save anything if all fences are signaled */
162 	if (!amdgpu_fence_count_emitted(ring))
163 		return 0;
164 
165 	ptr = le32_to_cpu(*ring->next_rptr_cpu_addr);
166 
167 	size = ring->wptr + (ring->ring_size / 4);
168 	size -= ptr;
169 	size &= ring->ptr_mask;
170 	if (size == 0)
171 		return 0;
172 
173 	/* and then save the content of the ring */
174 	*data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
175 	if (!*data)
176 		return 0;
177 	for (i = 0; i < size; ++i) {
178 		(*data)[i] = ring->ring[ptr++];
179 		ptr &= ring->ptr_mask;
180 	}
181 
182 	return size;
183 }
184 
185 /**
186  * amdgpu_ring_restore - append saved commands to the ring again
187  *
188  * @ring: ring to append commands to
189  * @size: number of dwords we want to write
190  * @data: saved commands
191  *
192  * Allocates space on the ring and restore the previously saved commands.
193  */
194 int amdgpu_ring_restore(struct amdgpu_ring *ring,
195 			unsigned size, uint32_t *data)
196 {
197 	int i, r;
198 
199 	if (!size || !data)
200 		return 0;
201 
202 	/* restore the saved ring content */
203 	r = amdgpu_ring_alloc(ring, size);
204 	if (r)
205 		return r;
206 
207 	for (i = 0; i < size; ++i) {
208 		amdgpu_ring_write(ring, data[i]);
209 	}
210 
211 	amdgpu_ring_commit(ring);
212 	kfree(data);
213 	return 0;
214 }
215 
216 /**
217  * amdgpu_ring_init - init driver ring struct.
218  *
219  * @adev: amdgpu_device pointer
220  * @ring: amdgpu_ring structure holding ring information
221  * @max_ndw: maximum number of dw for ring alloc
222  * @nop: nop packet for this ring
223  *
224  * Initialize the driver information for the selected ring (all asics).
225  * Returns 0 on success, error on failure.
226  */
227 int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
228 		     unsigned max_dw, u32 nop, u32 align_mask,
229 		     struct amdgpu_irq_src *irq_src, unsigned irq_type,
230 		     enum amdgpu_ring_type ring_type)
231 {
232 	int r;
233 
234 	if (ring->adev == NULL) {
235 		if (adev->num_rings >= AMDGPU_MAX_RINGS)
236 			return -EINVAL;
237 
238 		ring->adev = adev;
239 		ring->idx = adev->num_rings++;
240 		adev->rings[ring->idx] = ring;
241 		r = amdgpu_fence_driver_init_ring(ring,
242 			amdgpu_sched_hw_submission);
243 		if (r)
244 			return r;
245 	}
246 
247 	r = amdgpu_wb_get(adev, &ring->rptr_offs);
248 	if (r) {
249 		dev_err(adev->dev, "(%d) ring rptr_offs wb alloc failed\n", r);
250 		return r;
251 	}
252 
253 	r = amdgpu_wb_get(adev, &ring->wptr_offs);
254 	if (r) {
255 		dev_err(adev->dev, "(%d) ring wptr_offs wb alloc failed\n", r);
256 		return r;
257 	}
258 
259 	r = amdgpu_wb_get(adev, &ring->fence_offs);
260 	if (r) {
261 		dev_err(adev->dev, "(%d) ring fence_offs wb alloc failed\n", r);
262 		return r;
263 	}
264 
265 	r = amdgpu_wb_get(adev, &ring->next_rptr_offs);
266 	if (r) {
267 		dev_err(adev->dev, "(%d) ring next_rptr wb alloc failed\n", r);
268 		return r;
269 	}
270 	ring->next_rptr_gpu_addr = adev->wb.gpu_addr + ring->next_rptr_offs * 4;
271 	ring->next_rptr_cpu_addr = &adev->wb.wb[ring->next_rptr_offs];
272 
273 	r = amdgpu_wb_get(adev, &ring->cond_exe_offs);
274 	if (r) {
275 		dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r);
276 		return r;
277 	}
278 	ring->cond_exe_gpu_addr = adev->wb.gpu_addr + (ring->cond_exe_offs * 4);
279 	ring->cond_exe_cpu_addr = &adev->wb.wb[ring->cond_exe_offs];
280 
281 	spin_lock_init(&ring->fence_lock);
282 	r = amdgpu_fence_driver_start_ring(ring, irq_src, irq_type);
283 	if (r) {
284 		dev_err(adev->dev, "failed initializing fences (%d).\n", r);
285 		return r;
286 	}
287 
288 	ring->ring_size = roundup_pow_of_two(max_dw * 4 *
289 					     amdgpu_sched_hw_submission);
290 	ring->align_mask = align_mask;
291 	ring->nop = nop;
292 	ring->type = ring_type;
293 
294 	/* Allocate ring buffer */
295 	if (ring->ring_obj == NULL) {
296 		r = amdgpu_bo_create(adev, ring->ring_size, PAGE_SIZE, true,
297 				     AMDGPU_GEM_DOMAIN_GTT, 0,
298 				     NULL, NULL, &ring->ring_obj);
299 		if (r) {
300 			dev_err(adev->dev, "(%d) ring create failed\n", r);
301 			return r;
302 		}
303 		r = amdgpu_bo_reserve(ring->ring_obj, false);
304 		if (unlikely(r != 0))
305 			return r;
306 		r = amdgpu_bo_pin(ring->ring_obj, AMDGPU_GEM_DOMAIN_GTT,
307 					&ring->gpu_addr);
308 		if (r) {
309 			amdgpu_bo_unreserve(ring->ring_obj);
310 			dev_err(adev->dev, "(%d) ring pin failed\n", r);
311 			return r;
312 		}
313 		r = amdgpu_bo_kmap(ring->ring_obj,
314 				       (void **)&ring->ring);
315 
316 		memset((void *)ring->ring, 0, ring->ring_size);
317 
318 		amdgpu_bo_unreserve(ring->ring_obj);
319 		if (r) {
320 			dev_err(adev->dev, "(%d) ring map failed\n", r);
321 			return r;
322 		}
323 	}
324 	ring->ptr_mask = (ring->ring_size / 4) - 1;
325 	ring->max_dw = max_dw;
326 
327 	if (amdgpu_debugfs_ring_init(adev, ring)) {
328 		DRM_ERROR("Failed to register debugfs file for rings !\n");
329 	}
330 	return 0;
331 }
332 
333 /**
334  * amdgpu_ring_fini - tear down the driver ring struct.
335  *
336  * @adev: amdgpu_device pointer
337  * @ring: amdgpu_ring structure holding ring information
338  *
339  * Tear down the driver information for the selected ring (all asics).
340  */
341 void amdgpu_ring_fini(struct amdgpu_ring *ring)
342 {
343 	int r;
344 	struct amdgpu_bo *ring_obj;
345 
346 	ring_obj = ring->ring_obj;
347 	ring->ready = false;
348 	ring->ring = NULL;
349 	ring->ring_obj = NULL;
350 
351 	amdgpu_wb_free(ring->adev, ring->cond_exe_offs);
352 	amdgpu_wb_free(ring->adev, ring->fence_offs);
353 	amdgpu_wb_free(ring->adev, ring->rptr_offs);
354 	amdgpu_wb_free(ring->adev, ring->wptr_offs);
355 	amdgpu_wb_free(ring->adev, ring->next_rptr_offs);
356 
357 	if (ring_obj) {
358 		r = amdgpu_bo_reserve(ring_obj, false);
359 		if (likely(r == 0)) {
360 			amdgpu_bo_kunmap(ring_obj);
361 			amdgpu_bo_unpin(ring_obj);
362 			amdgpu_bo_unreserve(ring_obj);
363 		}
364 		amdgpu_bo_unref(&ring_obj);
365 	}
366 	amdgpu_debugfs_ring_fini(ring);
367 }
368 
369 /*
370  * Debugfs info
371  */
372 #if defined(CONFIG_DEBUG_FS)
373 
374 /* Layout of file is 12 bytes consisting of
375  * - rptr
376  * - wptr
377  * - driver's copy of wptr
378  *
379  * followed by n-words of ring data
380  */
381 static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf,
382 					size_t size, loff_t *pos)
383 {
384 	struct amdgpu_ring *ring = (struct amdgpu_ring*)f->f_inode->i_private;
385 	int r, i;
386 	uint32_t value, result, early[3];
387 
388 	if (*pos & 3 || size & 3)
389 		return -EINVAL;
390 
391 	result = 0;
392 
393 	if (*pos < 12) {
394 		early[0] = amdgpu_ring_get_rptr(ring);
395 		early[1] = amdgpu_ring_get_wptr(ring);
396 		early[2] = ring->wptr;
397 		for (i = *pos / 4; i < 3 && size; i++) {
398 			r = put_user(early[i], (uint32_t *)buf);
399 			if (r)
400 				return r;
401 			buf += 4;
402 			result += 4;
403 			size -= 4;
404 			*pos += 4;
405 		}
406 	}
407 
408 	while (size) {
409 		if (*pos >= (ring->ring_size + 12))
410 			return result;
411 
412 		value = ring->ring[(*pos - 12)/4];
413 		r = put_user(value, (uint32_t*)buf);
414 		if (r)
415 			return r;
416 		buf += 4;
417 		result += 4;
418 		size -= 4;
419 		*pos += 4;
420 	}
421 
422 	return result;
423 }
424 
425 static const struct file_operations amdgpu_debugfs_ring_fops = {
426 	.owner = THIS_MODULE,
427 	.read = amdgpu_debugfs_ring_read,
428 	.llseek = default_llseek
429 };
430 
431 #endif
432 
433 static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
434 				    struct amdgpu_ring *ring)
435 {
436 #if defined(CONFIG_DEBUG_FS)
437 	struct drm_minor *minor = adev->ddev->primary;
438 	struct dentry *ent, *root = minor->debugfs_root;
439 	char name[32];
440 
441 	sprintf(name, "amdgpu_ring_%s", ring->name);
442 
443 	ent = debugfs_create_file(name,
444 				  S_IFREG | S_IRUGO, root,
445 				  ring, &amdgpu_debugfs_ring_fops);
446 	if (IS_ERR(ent))
447 		return PTR_ERR(ent);
448 
449 	i_size_write(ent->d_inode, ring->ring_size + 12);
450 	ring->ent = ent;
451 #endif
452 	return 0;
453 }
454 
455 static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring)
456 {
457 #if defined(CONFIG_DEBUG_FS)
458 	debugfs_remove(ring->ent);
459 #endif
460 }
461