1 /*
2  * Copyright © 2008-2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <[email protected]>
25  *    Zou Nan hai <[email protected]>
26  *    Xiang Hai hao<[email protected]>
27  *
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <dev/drm2/drmP.h>
34 #include <dev/drm2/drm.h>
35 #include <dev/drm2/i915/i915_drm.h>
36 #include <dev/drm2/i915/i915_drv.h>
37 #include <dev/drm2/i915/intel_drv.h>
38 #include <dev/drm2/i915/intel_ringbuffer.h>
39 #include <sys/sched.h>
40 #include <sys/sf_buf.h>
41 
42 /*
43  * 965+ support PIPE_CONTROL commands, which provide finer grained control
44  * over cache flushing.
45  */
46 struct pipe_control {
47 	struct drm_i915_gem_object *obj;
48 	volatile u32 *cpu_page;
49 	u32 gtt_offset;
50 };
51 
52 void
53 i915_trace_irq_get(struct intel_ring_buffer *ring, uint32_t seqno)
54 {
55 	struct drm_i915_private *dev_priv;
56 
57 	if (ring->trace_irq_seqno == 0) {
58 		dev_priv = ring->dev->dev_private;
59 		mtx_lock(&dev_priv->irq_lock);
60 		if (ring->irq_get(ring))
61 			ring->trace_irq_seqno = seqno;
62 		mtx_unlock(&dev_priv->irq_lock);
63 	}
64 }
65 
66 static inline int ring_space(struct intel_ring_buffer *ring)
67 {
68 	int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
69 	if (space < 0)
70 		space += ring->size;
71 	return space;
72 }
73 
74 static int
75 gen2_render_ring_flush(struct intel_ring_buffer *ring,
76 		       u32	invalidate_domains,
77 		       u32	flush_domains)
78 {
79 	u32 cmd;
80 	int ret;
81 
82 	cmd = MI_FLUSH;
83 	if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0)
84 		cmd |= MI_NO_WRITE_FLUSH;
85 
86 	if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
87 		cmd |= MI_READ_FLUSH;
88 
89 	ret = intel_ring_begin(ring, 2);
90 	if (ret)
91 		return ret;
92 
93 	intel_ring_emit(ring, cmd);
94 	intel_ring_emit(ring, MI_NOOP);
95 	intel_ring_advance(ring);
96 
97 	return 0;
98 }
99 
100 static int
101 gen4_render_ring_flush(struct intel_ring_buffer *ring,
102 		       u32	invalidate_domains,
103 		       u32	flush_domains)
104 {
105 	struct drm_device *dev = ring->dev;
106 	u32 cmd;
107 	int ret;
108 
109 	/*
110 	 * read/write caches:
111 	 *
112 	 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
113 	 * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
114 	 * also flushed at 2d versus 3d pipeline switches.
115 	 *
116 	 * read-only caches:
117 	 *
118 	 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
119 	 * MI_READ_FLUSH is set, and is always flushed on 965.
120 	 *
121 	 * I915_GEM_DOMAIN_COMMAND may not exist?
122 	 *
123 	 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
124 	 * invalidated when MI_EXE_FLUSH is set.
125 	 *
126 	 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
127 	 * invalidated with every MI_FLUSH.
128 	 *
129 	 * TLBs:
130 	 *
131 	 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
132 	 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
133 	 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
134 	 * are flushed at any MI_FLUSH.
135 	 */
136 
137 	cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
138 	if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER)
139 		cmd &= ~MI_NO_WRITE_FLUSH;
140 	if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
141 		cmd |= MI_EXE_FLUSH;
142 
143 	if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
144 	    (IS_G4X(dev) || IS_GEN5(dev)))
145 		cmd |= MI_INVALIDATE_ISP;
146 
147 	ret = intel_ring_begin(ring, 2);
148 	if (ret)
149 		return ret;
150 
151 	intel_ring_emit(ring, cmd);
152 	intel_ring_emit(ring, MI_NOOP);
153 	intel_ring_advance(ring);
154 
155 	return 0;
156 }
157 
158 /**
159  * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
160  * implementing two workarounds on gen6.  From section 1.4.7.1
161  * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
162  *
163  * [DevSNB-C+{W/A}] Before any depth stall flush (including those
164  * produced by non-pipelined state commands), software needs to first
165  * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
166  * 0.
167  *
168  * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
169  * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
170  *
171  * And the workaround for these two requires this workaround first:
172  *
173  * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
174  * BEFORE the pipe-control with a post-sync op and no write-cache
175  * flushes.
176  *
177  * And this last workaround is tricky because of the requirements on
178  * that bit.  From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
179  * volume 2 part 1:
180  *
181  *     "1 of the following must also be set:
182  *      - Render Target Cache Flush Enable ([12] of DW1)
183  *      - Depth Cache Flush Enable ([0] of DW1)
184  *      - Stall at Pixel Scoreboard ([1] of DW1)
185  *      - Depth Stall ([13] of DW1)
186  *      - Post-Sync Operation ([13] of DW1)
187  *      - Notify Enable ([8] of DW1)"
188  *
189  * The cache flushes require the workaround flush that triggered this
190  * one, so we can't use it.  Depth stall would trigger the same.
191  * Post-sync nonzero is what triggered this second workaround, so we
192  * can't use that one either.  Notify enable is IRQs, which aren't
193  * really our business.  That leaves only stall at scoreboard.
194  */
195 static int
196 intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
197 {
198 	struct pipe_control *pc = ring->private;
199 	u32 scratch_addr = pc->gtt_offset + 128;
200 	int ret;
201 
202 
203 	ret = intel_ring_begin(ring, 6);
204 	if (ret)
205 		return ret;
206 
207 	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
208 	intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
209 			PIPE_CONTROL_STALL_AT_SCOREBOARD);
210 	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
211 	intel_ring_emit(ring, 0); /* low dword */
212 	intel_ring_emit(ring, 0); /* high dword */
213 	intel_ring_emit(ring, MI_NOOP);
214 	intel_ring_advance(ring);
215 
216 	ret = intel_ring_begin(ring, 6);
217 	if (ret)
218 		return ret;
219 
220 	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
221 	intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
222 	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
223 	intel_ring_emit(ring, 0);
224 	intel_ring_emit(ring, 0);
225 	intel_ring_emit(ring, MI_NOOP);
226 	intel_ring_advance(ring);
227 
228 	return 0;
229 }
230 
231 static int
232 gen6_render_ring_flush(struct intel_ring_buffer *ring,
233                          u32 invalidate_domains, u32 flush_domains)
234 {
235 	u32 flags = 0;
236 	struct pipe_control *pc = ring->private;
237 	u32 scratch_addr = pc->gtt_offset + 128;
238 	int ret;
239 
240 	/* Force SNB workarounds for PIPE_CONTROL flushes */
241 	intel_emit_post_sync_nonzero_flush(ring);
242 
243 	/* Just flush everything.  Experiments have shown that reducing the
244 	 * number of bits based on the write domains has little performance
245 	 * impact.
246 	 */
247 	flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
248 	flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
249 	flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
250 	flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
251 	flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
252 	flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
253 	flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
254 
255 	ret = intel_ring_begin(ring, 6);
256 	if (ret)
257 		return ret;
258 
259 	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
260 	intel_ring_emit(ring, flags);
261 	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
262 	intel_ring_emit(ring, 0); /* lower dword */
263 	intel_ring_emit(ring, 0); /* uppwer dword */
264 	intel_ring_emit(ring, MI_NOOP);
265 	intel_ring_advance(ring);
266 
267 	return 0;
268 }
269 
270 static void ring_write_tail(struct intel_ring_buffer *ring,
271 			    u32 value)
272 {
273 	drm_i915_private_t *dev_priv = ring->dev->dev_private;
274 	I915_WRITE_TAIL(ring, value);
275 }
276 
277 u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
278 {
279 	drm_i915_private_t *dev_priv = ring->dev->dev_private;
280 	u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
281 			RING_ACTHD(ring->mmio_base) : ACTHD;
282 
283 	return I915_READ(acthd_reg);
284 }
285 
286 static int init_ring_common(struct intel_ring_buffer *ring)
287 {
288 	drm_i915_private_t *dev_priv = ring->dev->dev_private;
289 	struct drm_i915_gem_object *obj = ring->obj;
290 	u32 head;
291 
292 	/* Stop the ring if it's running. */
293 	I915_WRITE_CTL(ring, 0);
294 	I915_WRITE_HEAD(ring, 0);
295 	ring->write_tail(ring, 0);
296 
297 	/* Initialize the ring. */
298 	I915_WRITE_START(ring, obj->gtt_offset);
299 	head = I915_READ_HEAD(ring) & HEAD_ADDR;
300 
301 	/* G45 ring initialization fails to reset head to zero */
302 	if (head != 0) {
303 		DRM_DEBUG_KMS("%s head not reset to zero "
304 			      "ctl %08x head %08x tail %08x start %08x\n",
305 			      ring->name,
306 			      I915_READ_CTL(ring),
307 			      I915_READ_HEAD(ring),
308 			      I915_READ_TAIL(ring),
309 			      I915_READ_START(ring));
310 
311 		I915_WRITE_HEAD(ring, 0);
312 
313 		if (I915_READ_HEAD(ring) & HEAD_ADDR) {
314 			DRM_ERROR("failed to set %s head to zero "
315 				  "ctl %08x head %08x tail %08x start %08x\n",
316 				  ring->name,
317 				  I915_READ_CTL(ring),
318 				  I915_READ_HEAD(ring),
319 				  I915_READ_TAIL(ring),
320 				  I915_READ_START(ring));
321 		}
322 	}
323 
324 	I915_WRITE_CTL(ring,
325 			((ring->size - PAGE_SIZE) & RING_NR_PAGES)
326 			| RING_VALID);
327 
328 	/* If the head is still not zero, the ring is dead */
329 	if (_intel_wait_for(ring->dev,
330 	    (I915_READ_CTL(ring) & RING_VALID) != 0 &&
331 	     I915_READ_START(ring) == obj->gtt_offset &&
332 	     (I915_READ_HEAD(ring) & HEAD_ADDR) == 0,
333 	    50, 1, "915rii")) {
334 		DRM_ERROR("%s initialization failed "
335 				"ctl %08x head %08x tail %08x start %08x\n",
336 				ring->name,
337 				I915_READ_CTL(ring),
338 				I915_READ_HEAD(ring),
339 				I915_READ_TAIL(ring),
340 				I915_READ_START(ring));
341 		return -EIO;
342 	}
343 
344 	if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
345 		i915_kernel_lost_context(ring->dev);
346 	else {
347 		ring->head = I915_READ_HEAD(ring);
348 		ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
349 		ring->space = ring_space(ring);
350 	}
351 
352 	return 0;
353 }
354 
355 static int
356 init_pipe_control(struct intel_ring_buffer *ring)
357 {
358 	struct pipe_control *pc;
359 	struct drm_i915_gem_object *obj;
360 	int ret;
361 
362 	if (ring->private)
363 		return 0;
364 
365 	pc = malloc(sizeof(*pc), DRM_I915_GEM, M_WAITOK);
366 	if (!pc)
367 		return -ENOMEM;
368 
369 	obj = i915_gem_alloc_object(ring->dev, 4096);
370 	if (obj == NULL) {
371 		DRM_ERROR("Failed to allocate seqno page\n");
372 		ret = -ENOMEM;
373 		goto err;
374 	}
375 
376 	i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
377 
378 	ret = i915_gem_object_pin(obj, 4096, true);
379 	if (ret)
380 		goto err_unref;
381 
382 	pc->gtt_offset = obj->gtt_offset;
383 	pc->cpu_page = (uint32_t *)kva_alloc(PAGE_SIZE);
384 	if (pc->cpu_page == NULL)
385 		goto err_unpin;
386 	pmap_qenter((uintptr_t)pc->cpu_page, &obj->pages[0], 1);
387 	pmap_invalidate_cache_range((vm_offset_t)pc->cpu_page,
388 	    (vm_offset_t)pc->cpu_page + PAGE_SIZE, FALSE);
389 
390 	pc->obj = obj;
391 	ring->private = pc;
392 	return 0;
393 
394 err_unpin:
395 	i915_gem_object_unpin(obj);
396 err_unref:
397 	drm_gem_object_unreference(&obj->base);
398 err:
399 	free(pc, DRM_I915_GEM);
400 	return ret;
401 }
402 
403 static void
404 cleanup_pipe_control(struct intel_ring_buffer *ring)
405 {
406 	struct pipe_control *pc = ring->private;
407 	struct drm_i915_gem_object *obj;
408 
409 	if (!ring->private)
410 		return;
411 
412 	obj = pc->obj;
413 
414 	pmap_qremove((vm_offset_t)pc->cpu_page, 1);
415 	kva_free((uintptr_t)pc->cpu_page, PAGE_SIZE);
416 	i915_gem_object_unpin(obj);
417 	drm_gem_object_unreference(&obj->base);
418 
419 	free(pc, DRM_I915_GEM);
420 	ring->private = NULL;
421 }
422 
423 static int init_render_ring(struct intel_ring_buffer *ring)
424 {
425 	struct drm_device *dev = ring->dev;
426 	struct drm_i915_private *dev_priv = dev->dev_private;
427 	int ret = init_ring_common(ring);
428 
429 	if (INTEL_INFO(dev)->gen > 3) {
430 		I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
431 		if (IS_GEN7(dev))
432 			I915_WRITE(GFX_MODE_GEN7,
433 				   _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
434 				   _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
435 	}
436 
437 	if (INTEL_INFO(dev)->gen >= 5) {
438 		ret = init_pipe_control(ring);
439 		if (ret)
440 			return ret;
441 	}
442 
443 	if (IS_GEN6(dev)) {
444 		/* From the Sandybridge PRM, volume 1 part 3, page 24:
445 		 * "If this bit is set, STCunit will have LRA as replacement
446 		 *  policy. [...] This bit must be reset.  LRA replacement
447 		 *  policy is not supported."
448 		 */
449 		I915_WRITE(CACHE_MODE_0,
450 			   _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
451 
452 		/* This is not explicitly set for GEN6, so read the register.
453 		 * see intel_ring_mi_set_context() for why we care.
454 		 * TODO: consider explicitly setting the bit for GEN5
455 		 */
456 		ring->itlb_before_ctx_switch =
457 			!!(I915_READ(GFX_MODE) & GFX_TLB_INVALIDATE_ALWAYS);
458 	}
459 
460 	if (INTEL_INFO(dev)->gen >= 6)
461 		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
462 
463 	return ret;
464 }
465 
466 static void render_ring_cleanup(struct intel_ring_buffer *ring)
467 {
468 	if (!ring->private)
469 		return;
470 
471 	cleanup_pipe_control(ring);
472 }
473 
474 static void
475 update_mboxes(struct intel_ring_buffer *ring,
476 	      u32 seqno,
477 	      u32 mmio_offset)
478 {
479 	intel_ring_emit(ring, MI_SEMAPHORE_MBOX |
480 			      MI_SEMAPHORE_GLOBAL_GTT |
481 			      MI_SEMAPHORE_REGISTER |
482 			      MI_SEMAPHORE_UPDATE);
483 	intel_ring_emit(ring, seqno);
484 	intel_ring_emit(ring, mmio_offset);
485 }
486 
487 /**
488  * gen6_add_request - Update the semaphore mailbox registers
489  *
490  * @ring - ring that is adding a request
491  * @seqno - return seqno stuck into the ring
492  *
493  * Update the mailbox registers in the *other* rings with the current seqno.
494  * This acts like a signal in the canonical semaphore.
495  */
496 static int
497 gen6_add_request(struct intel_ring_buffer *ring,
498 		 u32 *seqno)
499 {
500 	u32 mbox1_reg;
501 	u32 mbox2_reg;
502 	int ret;
503 
504 	ret = intel_ring_begin(ring, 10);
505 	if (ret)
506 		return ret;
507 
508 	mbox1_reg = ring->signal_mbox[0];
509 	mbox2_reg = ring->signal_mbox[1];
510 
511 	*seqno = i915_gem_next_request_seqno(ring);
512 
513 	update_mboxes(ring, *seqno, mbox1_reg);
514 	update_mboxes(ring, *seqno, mbox2_reg);
515 	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
516 	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
517 	intel_ring_emit(ring, *seqno);
518 	intel_ring_emit(ring, MI_USER_INTERRUPT);
519 	intel_ring_advance(ring);
520 
521 	return 0;
522 }
523 
524 /**
525  * intel_ring_sync - sync the waiter to the signaller on seqno
526  *
527  * @waiter - ring that is waiting
528  * @signaller - ring which has, or will signal
529  * @seqno - seqno which the waiter will block on
530  */
531 static int
532 gen6_ring_sync(struct intel_ring_buffer *waiter,
533 	       struct intel_ring_buffer *signaller,
534 	       u32 seqno)
535 {
536 	int ret;
537 	u32 dw1 = MI_SEMAPHORE_MBOX |
538 		  MI_SEMAPHORE_COMPARE |
539 		  MI_SEMAPHORE_REGISTER;
540 
541 	/* Throughout all of the GEM code, seqno passed implies our current
542 	 * seqno is >= the last seqno executed. However for hardware the
543 	 * comparison is strictly greater than.
544 	 */
545 	seqno -= 1;
546 
547 	if (signaller->semaphore_register[waiter->id] ==
548 	    MI_SEMAPHORE_SYNC_INVALID)
549 		printf("gen6_ring_sync semaphore_register %d invalid\n",
550 		    waiter->id);
551 
552 	ret = intel_ring_begin(waiter, 4);
553 	if (ret)
554 		return ret;
555 
556 	intel_ring_emit(waiter,
557 			dw1 | signaller->semaphore_register[waiter->id]);
558 	intel_ring_emit(waiter, seqno);
559 	intel_ring_emit(waiter, 0);
560 	intel_ring_emit(waiter, MI_NOOP);
561 	intel_ring_advance(waiter);
562 
563 	return 0;
564 }
565 
566 int render_ring_sync_to(struct intel_ring_buffer *waiter,
567     struct intel_ring_buffer *signaller, u32 seqno);
568 int gen6_bsd_ring_sync_to(struct intel_ring_buffer *waiter,
569     struct intel_ring_buffer *signaller, u32 seqno);
570 int gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter,
571     struct intel_ring_buffer *signaller, u32 seqno);
572 
573 #define PIPE_CONTROL_FLUSH(ring__, addr__)					\
574 do {									\
575 	intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |		\
576 		 PIPE_CONTROL_DEPTH_STALL);				\
577 	intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT);			\
578 	intel_ring_emit(ring__, 0);							\
579 	intel_ring_emit(ring__, 0);							\
580 } while (0)
581 
582 static int
583 pc_render_add_request(struct intel_ring_buffer *ring,
584 		      uint32_t *result)
585 {
586 	u32 seqno = i915_gem_next_request_seqno(ring);
587 	struct pipe_control *pc = ring->private;
588 	u32 scratch_addr = pc->gtt_offset + 128;
589 	int ret;
590 
591 	/* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
592 	 * incoherent with writes to memory, i.e. completely fubar,
593 	 * so we need to use PIPE_NOTIFY instead.
594 	 *
595 	 * However, we also need to workaround the qword write
596 	 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
597 	 * memory before requesting an interrupt.
598 	 */
599 	ret = intel_ring_begin(ring, 32);
600 	if (ret)
601 		return ret;
602 
603 	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
604 			PIPE_CONTROL_WRITE_FLUSH |
605 			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
606 	intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
607 	intel_ring_emit(ring, seqno);
608 	intel_ring_emit(ring, 0);
609 	PIPE_CONTROL_FLUSH(ring, scratch_addr);
610 	scratch_addr += 128; /* write to separate cachelines */
611 	PIPE_CONTROL_FLUSH(ring, scratch_addr);
612 	scratch_addr += 128;
613 	PIPE_CONTROL_FLUSH(ring, scratch_addr);
614 	scratch_addr += 128;
615 	PIPE_CONTROL_FLUSH(ring, scratch_addr);
616 	scratch_addr += 128;
617 	PIPE_CONTROL_FLUSH(ring, scratch_addr);
618 	scratch_addr += 128;
619 	PIPE_CONTROL_FLUSH(ring, scratch_addr);
620 	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
621 			PIPE_CONTROL_WRITE_FLUSH |
622 			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
623 			PIPE_CONTROL_NOTIFY);
624 	intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
625 	intel_ring_emit(ring, seqno);
626 	intel_ring_emit(ring, 0);
627 	intel_ring_advance(ring);
628 
629 	*result = seqno;
630 	return 0;
631 }
632 
633 static u32
634 gen6_ring_get_seqno(struct intel_ring_buffer *ring)
635 {
636 	struct drm_device *dev = ring->dev;
637 
638 	/* Workaround to force correct ordering between irq and seqno writes on
639 	 * ivb (and maybe also on snb) by reading from a CS register (like
640 	 * ACTHD) before reading the status page. */
641 	if (/* IS_GEN6(dev) || */IS_GEN7(dev))
642 		intel_ring_get_active_head(ring);
643 	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
644 }
645 
646 static u32
647 ring_get_seqno(struct intel_ring_buffer *ring)
648 {
649 	if (ring->status_page.page_addr == NULL)
650 		return (-1);
651 	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
652 }
653 
654 static u32
655 pc_render_get_seqno(struct intel_ring_buffer *ring)
656 {
657 	struct pipe_control *pc = ring->private;
658 	if (pc != NULL)
659 		return pc->cpu_page[0];
660 	else
661 		return (-1);
662 }
663 
664 static bool
665 gen5_ring_get_irq(struct intel_ring_buffer *ring)
666 {
667 	struct drm_device *dev = ring->dev;
668 	drm_i915_private_t *dev_priv = dev->dev_private;
669 
670 	if (!dev->irq_enabled)
671 		return false;
672 
673 	mtx_assert(&dev_priv->irq_lock, MA_OWNED);
674 	if (ring->irq_refcount++ == 0) {
675 		dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
676 		I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
677 		POSTING_READ(GTIMR);
678 	}
679 
680 	return true;
681 }
682 
683 static void
684 gen5_ring_put_irq(struct intel_ring_buffer *ring)
685 {
686 	struct drm_device *dev = ring->dev;
687 	drm_i915_private_t *dev_priv = dev->dev_private;
688 
689 	mtx_assert(&dev_priv->irq_lock, MA_OWNED);
690 	if (--ring->irq_refcount == 0) {
691 		dev_priv->gt_irq_mask |= ring->irq_enable_mask;
692 		I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
693 		POSTING_READ(GTIMR);
694 	}
695 }
696 
697 static bool
698 i9xx_ring_get_irq(struct intel_ring_buffer *ring)
699 {
700 	struct drm_device *dev = ring->dev;
701 	drm_i915_private_t *dev_priv = dev->dev_private;
702 
703 	if (!dev->irq_enabled)
704 		return false;
705 
706 	mtx_assert(&dev_priv->irq_lock, MA_OWNED);
707 	if (ring->irq_refcount++ == 0) {
708 		dev_priv->irq_mask &= ~ring->irq_enable_mask;
709 		I915_WRITE(IMR, dev_priv->irq_mask);
710 		POSTING_READ(IMR);
711 	}
712 
713 	return true;
714 }
715 
716 static void
717 i9xx_ring_put_irq(struct intel_ring_buffer *ring)
718 {
719 	struct drm_device *dev = ring->dev;
720 	drm_i915_private_t *dev_priv = dev->dev_private;
721 
722 	mtx_assert(&dev_priv->irq_lock, MA_OWNED);
723 	if (--ring->irq_refcount == 0) {
724 		dev_priv->irq_mask |= ring->irq_enable_mask;
725 		I915_WRITE(IMR, dev_priv->irq_mask);
726 		POSTING_READ(IMR);
727 	}
728 }
729 
730 static bool
731 i8xx_ring_get_irq(struct intel_ring_buffer *ring)
732 {
733 	struct drm_device *dev = ring->dev;
734 	drm_i915_private_t *dev_priv = dev->dev_private;
735 
736 	if (!dev->irq_enabled)
737 		return false;
738 
739 	mtx_assert(&dev_priv->irq_lock, MA_OWNED);
740 	if (ring->irq_refcount++ == 0) {
741 		dev_priv->irq_mask &= ~ring->irq_enable_mask;
742 		I915_WRITE16(IMR, dev_priv->irq_mask);
743 		POSTING_READ16(IMR);
744 	}
745 
746 	return true;
747 }
748 
749 static void
750 i8xx_ring_put_irq(struct intel_ring_buffer *ring)
751 {
752 	struct drm_device *dev = ring->dev;
753 	drm_i915_private_t *dev_priv = dev->dev_private;
754 
755 	mtx_assert(&dev_priv->irq_lock, MA_OWNED);
756 	if (--ring->irq_refcount == 0) {
757 		dev_priv->irq_mask |= ring->irq_enable_mask;
758 		I915_WRITE16(IMR, dev_priv->irq_mask);
759 		POSTING_READ16(IMR);
760 	}
761 }
762 
763 void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
764 {
765 	struct drm_device *dev = ring->dev;
766 	drm_i915_private_t *dev_priv = dev->dev_private;
767 	u32 mmio = 0;
768 
769 	/* The ring status page addresses are no longer next to the rest of
770 	 * the ring registers as of gen7.
771 	 */
772 	if (IS_GEN7(dev)) {
773 		switch (ring->id) {
774 		case RCS:
775 			mmio = RENDER_HWS_PGA_GEN7;
776 			break;
777 		case BCS:
778 			mmio = BLT_HWS_PGA_GEN7;
779 			break;
780 		case VCS:
781 			mmio = BSD_HWS_PGA_GEN7;
782 			break;
783 		}
784 	} else if (IS_GEN6(dev)) {
785 		mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
786 	} else {
787 		mmio = RING_HWS_PGA(ring->mmio_base);
788 	}
789 
790 	I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
791 	POSTING_READ(mmio);
792 }
793 
794 static int
795 bsd_ring_flush(struct intel_ring_buffer *ring,
796 	       u32     invalidate_domains,
797 	       u32     flush_domains)
798 {
799 	int ret;
800 
801 	ret = intel_ring_begin(ring, 2);
802 	if (ret)
803 		return ret;
804 
805 	intel_ring_emit(ring, MI_FLUSH);
806 	intel_ring_emit(ring, MI_NOOP);
807 	intel_ring_advance(ring);
808 	return 0;
809 }
810 
811 static int
812 i9xx_add_request(struct intel_ring_buffer *ring,
813 		 u32 *result)
814 {
815 	u32 seqno;
816 	int ret;
817 
818 	ret = intel_ring_begin(ring, 4);
819 	if (ret)
820 		return ret;
821 
822 	seqno = i915_gem_next_request_seqno(ring);
823 
824 	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
825 	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
826 	intel_ring_emit(ring, seqno);
827 	intel_ring_emit(ring, MI_USER_INTERRUPT);
828 	intel_ring_advance(ring);
829 
830 	*result = seqno;
831 	return 0;
832 }
833 
834 static bool
835 gen6_ring_get_irq(struct intel_ring_buffer *ring)
836 {
837 	struct drm_device *dev = ring->dev;
838 	drm_i915_private_t *dev_priv = dev->dev_private;
839 
840 	if (!dev->irq_enabled)
841 	       return false;
842 
843 	gen6_gt_force_wake_get(dev_priv);
844 
845 	mtx_assert(&dev_priv->irq_lock, MA_OWNED);
846 	if (ring->irq_refcount++ == 0) {
847 		I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
848 		dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
849 		I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
850 		POSTING_READ(GTIMR);
851 	}
852 
853 	return true;
854 }
855 
856 static void
857 gen6_ring_put_irq(struct intel_ring_buffer *ring)
858 {
859 	struct drm_device *dev = ring->dev;
860 	drm_i915_private_t *dev_priv = dev->dev_private;
861 
862 	mtx_assert(&dev_priv->irq_lock, MA_OWNED);
863 	if (--ring->irq_refcount == 0) {
864 		I915_WRITE_IMR(ring, ~0);
865 		dev_priv->gt_irq_mask |= ring->irq_enable_mask;
866 		I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
867 		POSTING_READ(GTIMR);
868 	}
869 
870 	gen6_gt_force_wake_put(dev_priv);
871 }
872 
873 static int
874 i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
875 			 u32 offset, u32 length)
876 {
877 	int ret;
878 
879 	ret = intel_ring_begin(ring, 2);
880 	if (ret)
881 		return ret;
882 
883 	intel_ring_emit(ring,
884 			MI_BATCH_BUFFER_START |
885 			MI_BATCH_GTT |
886 			MI_BATCH_NON_SECURE_I965);
887 	intel_ring_emit(ring, offset);
888 	intel_ring_advance(ring);
889 
890 	return 0;
891 }
892 
893 static int
894 i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
895 				u32 offset, u32 len)
896 {
897 	int ret;
898 
899 	ret = intel_ring_begin(ring, 4);
900 	if (ret)
901 		return ret;
902 
903 	intel_ring_emit(ring, MI_BATCH_BUFFER);
904 	intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
905 	intel_ring_emit(ring, offset + len - 8);
906 	intel_ring_emit(ring, 0);
907 	intel_ring_advance(ring);
908 
909 	return 0;
910 }
911 
912 static int
913 i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
914 			 u32 offset, u32 len)
915 {
916 	int ret;
917 
918 	ret = intel_ring_begin(ring, 2);
919 	if (ret)
920 		return ret;
921 
922 	intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
923 	intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
924 	intel_ring_advance(ring);
925 
926 	return 0;
927 }
928 
929 static void cleanup_status_page(struct intel_ring_buffer *ring)
930 {
931 	struct drm_i915_gem_object *obj;
932 
933 	obj = ring->status_page.obj;
934 	if (obj == NULL)
935 		return;
936 
937 	pmap_qremove((vm_offset_t)ring->status_page.page_addr, 1);
938 	kva_free((vm_offset_t)ring->status_page.page_addr,
939 	    PAGE_SIZE);
940 	i915_gem_object_unpin(obj);
941 	drm_gem_object_unreference(&obj->base);
942 	ring->status_page.obj = NULL;
943 }
944 
945 static int init_status_page(struct intel_ring_buffer *ring)
946 {
947 	struct drm_device *dev = ring->dev;
948 	struct drm_i915_gem_object *obj;
949 	int ret;
950 
951 	obj = i915_gem_alloc_object(dev, 4096);
952 	if (obj == NULL) {
953 		DRM_ERROR("Failed to allocate status page\n");
954 		ret = -ENOMEM;
955 		goto err;
956 	}
957 
958 	i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
959 
960 	ret = i915_gem_object_pin(obj, 4096, true);
961 	if (ret != 0) {
962 		goto err_unref;
963 	}
964 
965 	ring->status_page.gfx_addr = obj->gtt_offset;
966 	ring->status_page.page_addr = (void *)kva_alloc(PAGE_SIZE);
967 	if (ring->status_page.page_addr == NULL) {
968 		goto err_unpin;
969 	}
970 	pmap_qenter((vm_offset_t)ring->status_page.page_addr, &obj->pages[0],
971 	    1);
972 	pmap_invalidate_cache_range((vm_offset_t)ring->status_page.page_addr,
973 	    (vm_offset_t)ring->status_page.page_addr + PAGE_SIZE, FALSE);
974 	ring->status_page.obj = obj;
975 	memset(ring->status_page.page_addr, 0, PAGE_SIZE);
976 
977 	intel_ring_setup_status_page(ring);
978 	DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
979 			ring->name, ring->status_page.gfx_addr);
980 
981 	return 0;
982 
983 err_unpin:
984 	i915_gem_object_unpin(obj);
985 err_unref:
986 	drm_gem_object_unreference(&obj->base);
987 err:
988 	return ret;
989 }
990 
991 static int intel_init_ring_buffer(struct drm_device *dev,
992 			   struct intel_ring_buffer *ring)
993 {
994 	struct drm_i915_gem_object *obj;
995 	int ret;
996 
997 	ring->dev = dev;
998 	INIT_LIST_HEAD(&ring->active_list);
999 	INIT_LIST_HEAD(&ring->request_list);
1000 	INIT_LIST_HEAD(&ring->gpu_write_list);
1001 	ring->size = 32 * PAGE_SIZE;
1002 
1003 	if (I915_NEED_GFX_HWS(dev)) {
1004 		ret = init_status_page(ring);
1005 		if (ret)
1006 			return ret;
1007 	}
1008 
1009 	obj = i915_gem_alloc_object(dev, ring->size);
1010 	if (obj == NULL) {
1011 		DRM_ERROR("Failed to allocate ringbuffer\n");
1012 		ret = -ENOMEM;
1013 		goto err_hws;
1014 	}
1015 
1016 	ring->obj = obj;
1017 
1018 	ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
1019 	if (ret)
1020 		goto err_unref;
1021 
1022 	ring->virtual_start = pmap_mapdev_attr(
1023 	    dev->agp->base + obj->gtt_offset, ring->size,
1024 	    VM_MEMATTR_WRITE_COMBINING);
1025 	if (ring->virtual_start == NULL) {
1026 		DRM_ERROR("Failed to map ringbuffer.\n");
1027 		ret = -EINVAL;
1028 		goto err_unpin;
1029 	}
1030 
1031 	ret = ring->init(ring);
1032 	if (ret)
1033 		goto err_unmap;
1034 
1035 	/* Workaround an erratum on the i830 which causes a hang if
1036 	 * the TAIL pointer points to within the last 2 cachelines
1037 	 * of the buffer.
1038 	 */
1039 	ring->effective_size = ring->size;
1040 	if (IS_I830(ring->dev) || IS_845G(ring->dev))
1041 		ring->effective_size -= 128;
1042 
1043 	return 0;
1044 
1045 err_unmap:
1046 	pmap_unmapdev((vm_offset_t)ring->virtual_start, ring->size);
1047 err_unpin:
1048 	i915_gem_object_unpin(obj);
1049 err_unref:
1050 	drm_gem_object_unreference(&obj->base);
1051 	ring->obj = NULL;
1052 err_hws:
1053 	cleanup_status_page(ring);
1054 	return ret;
1055 }
1056 
1057 void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
1058 {
1059 	struct drm_i915_private *dev_priv;
1060 	int ret;
1061 
1062 	if (ring->obj == NULL)
1063 		return;
1064 
1065 	/* Disable the ring buffer. The ring must be idle at this point */
1066 	dev_priv = ring->dev->dev_private;
1067 	ret = intel_wait_ring_idle(ring);
1068 	I915_WRITE_CTL(ring, 0);
1069 
1070 	pmap_unmapdev((vm_offset_t)ring->virtual_start, ring->size);
1071 
1072 	i915_gem_object_unpin(ring->obj);
1073 	drm_gem_object_unreference(&ring->obj->base);
1074 	ring->obj = NULL;
1075 
1076 	if (ring->cleanup)
1077 		ring->cleanup(ring);
1078 
1079 	cleanup_status_page(ring);
1080 }
1081 
1082 static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
1083 {
1084 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
1085 	bool was_interruptible;
1086 	int ret;
1087 
1088 	/* XXX As we have not yet audited all the paths to check that
1089 	 * they are ready for ERESTARTSYS from intel_ring_begin, do not
1090 	 * allow us to be interruptible by a signal.
1091 	 */
1092 	was_interruptible = dev_priv->mm.interruptible;
1093 	dev_priv->mm.interruptible = false;
1094 
1095 	ret = i915_wait_request(ring, seqno);
1096 
1097 	dev_priv->mm.interruptible = was_interruptible;
1098 	if (!ret)
1099 		i915_gem_retire_requests_ring(ring);
1100 
1101 	return ret;
1102 }
1103 
1104 static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
1105 {
1106 	struct drm_i915_gem_request *request;
1107 	u32 seqno = 0;
1108 	int ret;
1109 
1110 	i915_gem_retire_requests_ring(ring);
1111 
1112 	if (ring->last_retired_head != -1) {
1113 		ring->head = ring->last_retired_head;
1114 		ring->last_retired_head = -1;
1115 		ring->space = ring_space(ring);
1116 		if (ring->space >= n)
1117 			return 0;
1118 	}
1119 
1120 	list_for_each_entry(request, &ring->request_list, list) {
1121 		int space;
1122 
1123 		if (request->tail == -1)
1124 			continue;
1125 
1126 		space = request->tail - (ring->tail + 8);
1127 		if (space < 0)
1128 			space += ring->size;
1129 		if (space >= n) {
1130 			seqno = request->seqno;
1131 			break;
1132 		}
1133 
1134 		/* Consume this request in case we need more space than
1135 		 * is available and so need to prevent a race between
1136 		 * updating last_retired_head and direct reads of
1137 		 * I915_RING_HEAD. It also provides a nice sanity check.
1138 		 */
1139 		request->tail = -1;
1140 	}
1141 
1142 	if (seqno == 0)
1143 		return -ENOSPC;
1144 
1145 	ret = intel_ring_wait_seqno(ring, seqno);
1146 	if (ret)
1147 		return ret;
1148 
1149 	if (ring->last_retired_head == -1)
1150 		return -ENOSPC;
1151 
1152 	ring->head = ring->last_retired_head;
1153 	ring->last_retired_head = -1;
1154 	ring->space = ring_space(ring);
1155 	if (ring->space < n)
1156 		return -ENOSPC;
1157 
1158 	return 0;
1159 }
1160 
1161 int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
1162 {
1163 	struct drm_device *dev = ring->dev;
1164 	struct drm_i915_private *dev_priv = dev->dev_private;
1165 	int end;
1166 	int ret;
1167 
1168 	ret = intel_ring_wait_request(ring, n);
1169 	if (ret != -ENOSPC)
1170 		return ret;
1171 
1172 	CTR1(KTR_DRM, "ring_wait_begin %s", ring->name);
1173 	/* With GEM the hangcheck timer should kick us out of the loop,
1174 	 * leaving it early runs the risk of corrupting GEM state (due
1175 	 * to running on almost untested codepaths). But on resume
1176 	 * timers don't work yet, so prevent a complete hang in that
1177 	 * case by choosing an insanely large timeout. */
1178 	end = ticks + hz * 60;
1179 
1180 	do {
1181 		ring->head = I915_READ_HEAD(ring);
1182 		ring->space = ring_space(ring);
1183 		if (ring->space >= n) {
1184 			CTR1(KTR_DRM, "ring_wait_end %s", ring->name);
1185 			return 0;
1186 		}
1187 
1188 		if (dev->primary->master) {
1189 			struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1190 			if (master_priv->sarea_priv)
1191 				master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1192 		}
1193 
1194 		pause("915rng", 1);
1195 		if (atomic_load_acq_32(&dev_priv->mm.wedged) != 0) {
1196 			CTR1(KTR_DRM, "ring_wait_end %s wedged", ring->name);
1197 			return -EAGAIN;
1198 		}
1199 	} while (!time_after(ticks, end));
1200 	CTR1(KTR_DRM, "ring_wait_end %s busy", ring->name);
1201 	return -EBUSY;
1202 }
1203 
1204 static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
1205 {
1206 	uint32_t *virt;
1207 	int rem = ring->size - ring->tail;
1208 
1209 	if (ring->space < rem) {
1210 		int ret = intel_wait_ring_buffer(ring, rem);
1211 		if (ret)
1212 			return ret;
1213 	}
1214 
1215 	virt = (uint32_t *)((char *)ring->virtual_start + ring->tail);
1216 	rem /= 4;
1217 	while (rem--)
1218 		*virt++ = MI_NOOP;
1219 
1220 	ring->tail = 0;
1221 	ring->space = ring_space(ring);
1222 
1223 	return 0;
1224 }
1225 
1226 int intel_ring_begin(struct intel_ring_buffer *ring,
1227 		     int num_dwords)
1228 {
1229 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
1230 	int n = 4*num_dwords;
1231 	int ret;
1232 
1233 	if (atomic_load_acq_int(&dev_priv->mm.wedged))
1234 		return -EIO;
1235 
1236 	if (ring->tail + n > ring->effective_size) {
1237 		ret = intel_wrap_ring_buffer(ring);
1238 		if (ret != 0)
1239 			return ret;
1240 	}
1241 
1242 	if (ring->space < n) {
1243 		ret = intel_wait_ring_buffer(ring, n);
1244 		if (ret != 0)
1245 			return ret;
1246 	}
1247 
1248 	ring->space -= n;
1249 	return 0;
1250 }
1251 
1252 void intel_ring_advance(struct intel_ring_buffer *ring)
1253 {
1254 	struct drm_i915_private *dev_priv = ring->dev->dev_private;
1255 
1256 	ring->tail &= ring->size - 1;
1257 	if (dev_priv->stop_rings & intel_ring_flag(ring))
1258 		return;
1259 	ring->write_tail(ring, ring->tail);
1260 }
1261 
1262 
1263 static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
1264 				     u32 value)
1265 {
1266 	drm_i915_private_t *dev_priv = ring->dev->dev_private;
1267 
1268 	/* Every tail move must follow the sequence below */
1269 	I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1270 	    GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1271 	    GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
1272 	I915_WRITE(GEN6_BSD_RNCID, 0x0);
1273 
1274 	if (_intel_wait_for(ring->dev,
1275 	    (I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
1276 	     GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0, 50,
1277 	    true, "915g6i") != 0)
1278 		DRM_ERROR("timed out waiting for IDLE Indicator\n");
1279 
1280 	I915_WRITE_TAIL(ring, value);
1281 	I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
1282 	    GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
1283 	    GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
1284 }
1285 
1286 static int gen6_ring_flush(struct intel_ring_buffer *ring,
1287 			   u32 invalidate, u32 flush)
1288 {
1289 	uint32_t cmd;
1290 	int ret;
1291 
1292 	ret = intel_ring_begin(ring, 4);
1293 	if (ret)
1294 		return ret;
1295 
1296 	cmd = MI_FLUSH_DW;
1297 	if (invalidate & I915_GEM_GPU_DOMAINS)
1298 		cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
1299 	intel_ring_emit(ring, cmd);
1300 	intel_ring_emit(ring, 0);
1301 	intel_ring_emit(ring, 0);
1302 	intel_ring_emit(ring, MI_NOOP);
1303 	intel_ring_advance(ring);
1304 	return 0;
1305 }
1306 
1307 static int
1308 gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1309 			      u32 offset, u32 len)
1310 {
1311 	int ret;
1312 
1313 	ret = intel_ring_begin(ring, 2);
1314 	if (ret)
1315 		return ret;
1316 
1317 	intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
1318 	/* bit0-7 is the length on GEN6+ */
1319 	intel_ring_emit(ring, offset);
1320 	intel_ring_advance(ring);
1321 
1322 	return 0;
1323 }
1324 
1325 /* Blitter support (SandyBridge+) */
1326 
1327 static int blt_ring_flush(struct intel_ring_buffer *ring,
1328 			  u32 invalidate, u32 flush)
1329 {
1330 	uint32_t cmd;
1331 	int ret;
1332 
1333 	ret = intel_ring_begin(ring, 4);
1334 	if (ret)
1335 		return ret;
1336 
1337 	cmd = MI_FLUSH_DW;
1338 	if (invalidate & I915_GEM_DOMAIN_RENDER)
1339 		cmd |= MI_INVALIDATE_TLB;
1340 	intel_ring_emit(ring, cmd);
1341 	intel_ring_emit(ring, 0);
1342 	intel_ring_emit(ring, 0);
1343 	intel_ring_emit(ring, MI_NOOP);
1344 	intel_ring_advance(ring);
1345 	return 0;
1346 }
1347 
1348 int intel_init_render_ring_buffer(struct drm_device *dev)
1349 {
1350 	drm_i915_private_t *dev_priv = dev->dev_private;
1351 	struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
1352 
1353 	ring->name = "render ring";
1354 	ring->id = RCS;
1355 	ring->mmio_base = RENDER_RING_BASE;
1356 
1357 	if (INTEL_INFO(dev)->gen >= 6) {
1358 		ring->add_request = gen6_add_request;
1359 		ring->flush = gen6_render_ring_flush;
1360 		ring->irq_get = gen6_ring_get_irq;
1361 		ring->irq_put = gen6_ring_put_irq;
1362 		ring->irq_enable_mask = GT_USER_INTERRUPT;
1363 		ring->get_seqno = gen6_ring_get_seqno;
1364 		ring->sync_to = gen6_ring_sync;
1365 		ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID;
1366 		ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV;
1367 		ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_RB;
1368 		ring->signal_mbox[0] = GEN6_VRSYNC;
1369 		ring->signal_mbox[1] = GEN6_BRSYNC;
1370 	} else if (IS_GEN5(dev)) {
1371 		ring->add_request = pc_render_add_request;
1372 		ring->flush = gen4_render_ring_flush;
1373 		ring->get_seqno = pc_render_get_seqno;
1374 		ring->irq_get = gen5_ring_get_irq;
1375 		ring->irq_put = gen5_ring_put_irq;
1376 		ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY;
1377 	} else {
1378 		ring->add_request = i9xx_add_request;
1379 		if (INTEL_INFO(dev)->gen < 4)
1380 			ring->flush = gen2_render_ring_flush;
1381 		else
1382 			ring->flush = gen4_render_ring_flush;
1383 		ring->get_seqno = ring_get_seqno;
1384 		if (IS_GEN2(dev)) {
1385 			ring->irq_get = i8xx_ring_get_irq;
1386 			ring->irq_put = i8xx_ring_put_irq;
1387 		} else {
1388 			ring->irq_get = i9xx_ring_get_irq;
1389 			ring->irq_put = i9xx_ring_put_irq;
1390 		}
1391 		ring->irq_enable_mask = I915_USER_INTERRUPT;
1392 	}
1393 	ring->write_tail = ring_write_tail;
1394 	if (INTEL_INFO(dev)->gen >= 6)
1395 		ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1396 	else if (INTEL_INFO(dev)->gen >= 4)
1397 		ring->dispatch_execbuffer = i965_dispatch_execbuffer;
1398 	else if (IS_I830(dev) || IS_845G(dev))
1399 		ring->dispatch_execbuffer = i830_dispatch_execbuffer;
1400 	else
1401 		ring->dispatch_execbuffer = i915_dispatch_execbuffer;
1402 	ring->init = init_render_ring;
1403 	ring->cleanup = render_ring_cleanup;
1404 
1405 
1406 	if (!I915_NEED_GFX_HWS(dev)) {
1407 		ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1408 		memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1409 	}
1410 
1411 	return intel_init_ring_buffer(dev, ring);
1412 }
1413 
1414 int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1415 {
1416 	drm_i915_private_t *dev_priv = dev->dev_private;
1417 	struct intel_ring_buffer *ring = &dev_priv->rings[RCS];
1418 
1419 	ring->name = "render ring";
1420 	ring->id = RCS;
1421 	ring->mmio_base = RENDER_RING_BASE;
1422 
1423 	if (INTEL_INFO(dev)->gen >= 6) {
1424 		/* non-kms not supported on gen6+ */
1425 		return -ENODEV;
1426 	}
1427 
1428 	/* Note: gem is not supported on gen5/ilk without kms (the corresponding
1429 	 * gem_init ioctl returns with -ENODEV). Hence we do not need to set up
1430 	 * the special gen5 functions. */
1431 	ring->add_request = i9xx_add_request;
1432 	if (INTEL_INFO(dev)->gen < 4)
1433 		ring->flush = gen2_render_ring_flush;
1434 	else
1435 		ring->flush = gen4_render_ring_flush;
1436 	ring->get_seqno = ring_get_seqno;
1437 	if (IS_GEN2(dev)) {
1438 		ring->irq_get = i8xx_ring_get_irq;
1439 		ring->irq_put = i8xx_ring_put_irq;
1440 	} else {
1441 		ring->irq_get = i9xx_ring_get_irq;
1442 		ring->irq_put = i9xx_ring_put_irq;
1443 	}
1444 	ring->irq_enable_mask = I915_USER_INTERRUPT;
1445 	ring->write_tail = ring_write_tail;
1446 	if (INTEL_INFO(dev)->gen >= 4)
1447 		ring->dispatch_execbuffer = i965_dispatch_execbuffer;
1448 	else if (IS_I830(dev) || IS_845G(dev))
1449 		ring->dispatch_execbuffer = i830_dispatch_execbuffer;
1450 	else
1451 		ring->dispatch_execbuffer = i915_dispatch_execbuffer;
1452 	ring->init = init_render_ring;
1453 	ring->cleanup = render_ring_cleanup;
1454 
1455 	ring->dev = dev;
1456 	INIT_LIST_HEAD(&ring->active_list);
1457 	INIT_LIST_HEAD(&ring->request_list);
1458 	INIT_LIST_HEAD(&ring->gpu_write_list);
1459 
1460 	ring->size = size;
1461 	ring->effective_size = ring->size;
1462 	if (IS_I830(ring->dev))
1463 		ring->effective_size -= 128;
1464 
1465 	ring->virtual_start = pmap_mapdev_attr(start, size,
1466 	    VM_MEMATTR_WRITE_COMBINING);
1467 	if (ring->virtual_start == NULL) {
1468 		DRM_ERROR("can not ioremap virtual address for"
1469 			  " ring buffer\n");
1470 		return -ENOMEM;
1471 	}
1472 
1473 	return 0;
1474 }
1475 
1476 int intel_init_bsd_ring_buffer(struct drm_device *dev)
1477 {
1478 	drm_i915_private_t *dev_priv = dev->dev_private;
1479 	struct intel_ring_buffer *ring = &dev_priv->rings[VCS];
1480 
1481 	ring->name = "bsd ring";
1482 	ring->id = VCS;
1483 
1484 	ring->write_tail = ring_write_tail;
1485 	if (IS_GEN6(dev) || IS_GEN7(dev)) {
1486 		ring->mmio_base = GEN6_BSD_RING_BASE;
1487 		/* gen6 bsd needs a special wa for tail updates */
1488 		if (IS_GEN6(dev))
1489 			ring->write_tail = gen6_bsd_ring_write_tail;
1490 		ring->flush = gen6_ring_flush;
1491 		ring->add_request = gen6_add_request;
1492 		ring->get_seqno = gen6_ring_get_seqno;
1493 		ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT;
1494 		ring->irq_get = gen6_ring_get_irq;
1495 		ring->irq_put = gen6_ring_put_irq;
1496 		ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1497 		ring->sync_to = gen6_ring_sync;
1498 		ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_VR;
1499 		ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_INVALID;
1500 		ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_VB;
1501 		ring->signal_mbox[0] = GEN6_RVSYNC;
1502 		ring->signal_mbox[1] = GEN6_BVSYNC;
1503 	} else {
1504 		ring->mmio_base = BSD_RING_BASE;
1505 		ring->flush = bsd_ring_flush;
1506 		ring->add_request = i9xx_add_request;
1507 		ring->get_seqno = ring_get_seqno;
1508 		if (IS_GEN5(dev)) {
1509 			ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
1510 			ring->irq_get = gen5_ring_get_irq;
1511 			ring->irq_put = gen5_ring_put_irq;
1512 		} else {
1513 			ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
1514 			ring->irq_get = i9xx_ring_get_irq;
1515 			ring->irq_put = i9xx_ring_put_irq;
1516 		}
1517 		ring->dispatch_execbuffer = i965_dispatch_execbuffer;
1518 	}
1519 	ring->init = init_ring_common;
1520 
1521 
1522 	return intel_init_ring_buffer(dev, ring);
1523 }
1524 
1525 int intel_init_blt_ring_buffer(struct drm_device *dev)
1526 {
1527 	drm_i915_private_t *dev_priv = dev->dev_private;
1528 	struct intel_ring_buffer *ring = &dev_priv->rings[BCS];
1529 
1530 	ring->name = "blitter ring";
1531 	ring->id = BCS;
1532 
1533 	ring->mmio_base = BLT_RING_BASE;
1534 	ring->write_tail = ring_write_tail;
1535 	ring->flush = blt_ring_flush;
1536 	ring->add_request = gen6_add_request;
1537 	ring->get_seqno = gen6_ring_get_seqno;
1538 	ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT;
1539 	ring->irq_get = gen6_ring_get_irq;
1540 	ring->irq_put = gen6_ring_put_irq;
1541 	ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1542 	ring->sync_to = gen6_ring_sync;
1543 	ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_BR;
1544 	ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_BV;
1545 	ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_INVALID;
1546 	ring->signal_mbox[0] = GEN6_RBSYNC;
1547 	ring->signal_mbox[1] = GEN6_VBSYNC;
1548 	ring->init = init_ring_common;
1549 
1550 	return intel_init_ring_buffer(dev, ring);
1551 }
1552