2 * Copyright 2008 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
25 * Jerome Glisse <glisse@freedesktop.org>
27 #include <linux/list_sort.h>
29 #include <drm/radeon_drm.h>
30 #include "radeon_reg.h"
32 #include "radeon_trace.h"
34 #define RADEON_CS_MAX_PRIORITY 32u
35 #define RADEON_CS_NUM_BUCKETS (RADEON_CS_MAX_PRIORITY + 1)
37 /* This is based on the bucket sort with O(n) time complexity.
38 * An item with priority "i" is added to bucket[i]. The lists are then
39 * concatenated in descending order.
41 struct radeon_cs_buckets {
42 struct list_head bucket[RADEON_CS_NUM_BUCKETS];
45 static void radeon_cs_buckets_init(struct radeon_cs_buckets *b)
49 for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++)
50 INIT_LIST_HEAD(&b->bucket[i]);
53 static void radeon_cs_buckets_add(struct radeon_cs_buckets *b,
54 struct list_head *item, unsigned priority)
56 /* Since buffers which appear sooner in the relocation list are
57 * likely to be used more often than buffers which appear later
58 * in the list, the sort mustn't change the ordering of buffers
59 * with the same priority, i.e. it must be stable.
61 list_add_tail(item, &b->bucket[min(priority, RADEON_CS_MAX_PRIORITY)]);
64 static void radeon_cs_buckets_get_list(struct radeon_cs_buckets *b,
65 struct list_head *out_list)
69 /* Connect the sorted buckets in the output list. */
70 for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++) {
71 list_splice(&b->bucket[i], out_list);
75 static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
77 struct drm_device *ddev = p->rdev->ddev;
78 struct radeon_cs_chunk *chunk;
79 struct radeon_cs_buckets buckets;
81 bool duplicate, need_mmap_lock = false;
84 if (p->chunk_relocs_idx == -1) {
87 chunk = &p->chunks[p->chunk_relocs_idx];
89 /* FIXME: we assume that each relocs use 4 dwords */
90 p->nrelocs = chunk->length_dw / 4;
91 p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL);
92 if (p->relocs_ptr == NULL) {
95 p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
96 if (p->relocs == NULL) {
100 radeon_cs_buckets_init(&buckets);
102 for (i = 0; i < p->nrelocs; i++) {
103 struct drm_radeon_cs_reloc *r;
107 r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
108 for (j = 0; j < i; j++) {
109 if (r->handle == p->relocs[j].handle) {
110 p->relocs_ptr[i] = &p->relocs[j];
116 p->relocs[i].handle = 0;
120 p->relocs[i].gobj = drm_gem_object_lookup(ddev, p->filp,
122 if (p->relocs[i].gobj == NULL) {
123 DRM_ERROR("gem object lookup failed 0x%x\n",
127 p->relocs_ptr[i] = &p->relocs[i];
128 p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
130 /* The userspace buffer priorities are from 0 to 15. A higher
131 * number means the buffer is more important.
132 * Also, the buffers used for write have a higher priority than
133 * the buffers used for read only, which doubles the range
134 * to 0 to 31. 32 is reserved for the kernel driver.
136 priority = (r->flags & RADEON_RELOC_PRIO_MASK) * 2
139 /* the first reloc of an UVD job is the msg and that must be in
140 VRAM, also but everything into VRAM on AGP cards and older
141 IGP chips to avoid image corruptions */
142 if (p->ring == R600_RING_TYPE_UVD_INDEX &&
143 (i == 0 || drm_pci_device_is_agp(p->rdev->ddev) ||
144 p->rdev->family == CHIP_RS780 ||
145 p->rdev->family == CHIP_RS880)) {
147 /* TODO: is this still needed for NI+ ? */
148 p->relocs[i].prefered_domains =
149 RADEON_GEM_DOMAIN_VRAM;
151 p->relocs[i].allowed_domains =
152 RADEON_GEM_DOMAIN_VRAM;
154 /* prioritize this over any other relocation */
155 priority = RADEON_CS_MAX_PRIORITY;
157 uint32_t domain = r->write_domain ?
158 r->write_domain : r->read_domains;
160 if (domain & RADEON_GEM_DOMAIN_CPU) {
161 DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid "
162 "for command submission\n");
166 p->relocs[i].prefered_domains = domain;
167 if (domain == RADEON_GEM_DOMAIN_VRAM)
168 domain |= RADEON_GEM_DOMAIN_GTT;
169 p->relocs[i].allowed_domains = domain;
172 if (radeon_ttm_tt_has_userptr(p->relocs[i].robj->tbo.ttm)) {
173 uint32_t domain = p->relocs[i].prefered_domains;
174 if (!(domain & RADEON_GEM_DOMAIN_GTT)) {
175 DRM_ERROR("Only RADEON_GEM_DOMAIN_GTT is "
176 "allowed for userptr BOs\n");
179 need_mmap_lock = true;
180 domain = RADEON_GEM_DOMAIN_GTT;
181 p->relocs[i].prefered_domains = domain;
182 p->relocs[i].allowed_domains = domain;
185 p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
186 p->relocs[i].tv.shared = false;
187 p->relocs[i].handle = r->handle;
189 radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
193 radeon_cs_buckets_get_list(&buckets, &p->validated);
195 if (p->cs_flags & RADEON_CS_USE_VM)
196 p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm,
199 down_read(¤t->mm->mmap_sem);
201 r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring);
204 up_read(¤t->mm->mmap_sem);
209 static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
211 p->priority = priority;
215 DRM_ERROR("unknown ring id: %d\n", ring);
217 case RADEON_CS_RING_GFX:
218 p->ring = RADEON_RING_TYPE_GFX_INDEX;
220 case RADEON_CS_RING_COMPUTE:
221 if (p->rdev->family >= CHIP_TAHITI) {
223 p->ring = CAYMAN_RING_TYPE_CP1_INDEX;
225 p->ring = CAYMAN_RING_TYPE_CP2_INDEX;
227 p->ring = RADEON_RING_TYPE_GFX_INDEX;
229 case RADEON_CS_RING_DMA:
230 if (p->rdev->family >= CHIP_CAYMAN) {
232 p->ring = R600_RING_TYPE_DMA_INDEX;
234 p->ring = CAYMAN_RING_TYPE_DMA1_INDEX;
235 } else if (p->rdev->family >= CHIP_RV770) {
236 p->ring = R600_RING_TYPE_DMA_INDEX;
241 case RADEON_CS_RING_UVD:
242 p->ring = R600_RING_TYPE_UVD_INDEX;
244 case RADEON_CS_RING_VCE:
245 /* TODO: only use the low priority ring for now */
246 p->ring = TN_RING_TYPE_VCE1_INDEX;
252 static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
256 for (i = 0; i < p->nrelocs; i++) {
257 struct reservation_object *resv;
260 if (!p->relocs[i].robj)
263 resv = p->relocs[i].robj->tbo.resv;
264 fence = reservation_object_get_excl(resv);
266 radeon_semaphore_sync_to(p->ib.semaphore,
267 (struct radeon_fence *)fence);
271 /* XXX: note that this is called from the legacy UMS CS ioctl as well */
272 int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
274 struct drm_radeon_cs *cs = data;
275 uint64_t *chunk_array_ptr;
277 u32 ring = RADEON_CS_RING_GFX;
280 if (!cs->num_chunks) {
284 INIT_LIST_HEAD(&p->validated);
287 p->ib.semaphore = NULL;
288 p->const_ib.sa_bo = NULL;
289 p->const_ib.semaphore = NULL;
290 p->chunk_ib_idx = -1;
291 p->chunk_relocs_idx = -1;
292 p->chunk_flags_idx = -1;
293 p->chunk_const_ib_idx = -1;
294 p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
295 if (p->chunks_array == NULL) {
298 chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
299 if (copy_from_user(p->chunks_array, chunk_array_ptr,
300 sizeof(uint64_t)*cs->num_chunks)) {
304 p->nchunks = cs->num_chunks;
305 p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
306 if (p->chunks == NULL) {
309 for (i = 0; i < p->nchunks; i++) {
310 struct drm_radeon_cs_chunk __user **chunk_ptr = NULL;
311 struct drm_radeon_cs_chunk user_chunk;
312 uint32_t __user *cdata;
314 chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i];
315 if (copy_from_user(&user_chunk, chunk_ptr,
316 sizeof(struct drm_radeon_cs_chunk))) {
319 p->chunks[i].length_dw = user_chunk.length_dw;
320 p->chunks[i].chunk_id = user_chunk.chunk_id;
321 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
322 p->chunk_relocs_idx = i;
324 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
326 /* zero length IB isn't useful */
327 if (p->chunks[i].length_dw == 0)
330 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB) {
331 p->chunk_const_ib_idx = i;
332 /* zero length CONST IB isn't useful */
333 if (p->chunks[i].length_dw == 0)
336 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
337 p->chunk_flags_idx = i;
338 /* zero length flags aren't useful */
339 if (p->chunks[i].length_dw == 0)
343 size = p->chunks[i].length_dw;
344 cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
345 p->chunks[i].user_ptr = cdata;
346 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB)
349 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
350 if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP))
354 p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
355 size *= sizeof(uint32_t);
356 if (p->chunks[i].kdata == NULL) {
359 if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
362 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
363 p->cs_flags = p->chunks[i].kdata[0];
364 if (p->chunks[i].length_dw > 1)
365 ring = p->chunks[i].kdata[1];
366 if (p->chunks[i].length_dw > 2)
367 priority = (s32)p->chunks[i].kdata[2];
371 /* these are KMS only */
373 if ((p->cs_flags & RADEON_CS_USE_VM) &&
374 !p->rdev->vm_manager.enabled) {
375 DRM_ERROR("VM not active on asic!\n");
379 if (radeon_cs_get_ring(p, ring, priority))
382 /* we only support VM on some SI+ rings */
383 if ((p->cs_flags & RADEON_CS_USE_VM) == 0) {
384 if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) {
385 DRM_ERROR("Ring %d requires VM!\n", p->ring);
389 if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) {
390 DRM_ERROR("VM not supported on ring %d!\n",
400 static int cmp_size_smaller_first(void *priv, struct list_head *a,
403 struct radeon_cs_reloc *la = list_entry(a, struct radeon_cs_reloc, tv.head);
404 struct radeon_cs_reloc *lb = list_entry(b, struct radeon_cs_reloc, tv.head);
406 /* Sort A before B if A is smaller. */
407 return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
411 * cs_parser_fini() - clean parser states
412 * @parser: parser structure holding parsing context.
413 * @error: error number
415 * If error is set than unvalidate buffer, otherwise just free memory
416 * used by parsing context.
418 static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bool backoff)
423 /* Sort the buffer list from the smallest to largest buffer,
424 * which affects the order of buffers in the LRU list.
425 * This assures that the smallest buffers are added first
426 * to the LRU list, so they are likely to be later evicted
427 * first, instead of large buffers whose eviction is more
430 * This slightly lowers the number of bytes moved by TTM
431 * per frame under memory pressure.
433 list_sort(NULL, &parser->validated, cmp_size_smaller_first);
435 ttm_eu_fence_buffer_objects(&parser->ticket,
437 &parser->ib.fence->base);
438 } else if (backoff) {
439 ttm_eu_backoff_reservation(&parser->ticket,
443 if (parser->relocs != NULL) {
444 for (i = 0; i < parser->nrelocs; i++) {
445 if (parser->relocs[i].gobj)
446 drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
449 kfree(parser->track);
450 kfree(parser->relocs);
451 kfree(parser->relocs_ptr);
452 kfree(parser->vm_bos);
453 for (i = 0; i < parser->nchunks; i++)
454 drm_free_large(parser->chunks[i].kdata);
455 kfree(parser->chunks);
456 kfree(parser->chunks_array);
457 radeon_ib_free(parser->rdev, &parser->ib);
458 radeon_ib_free(parser->rdev, &parser->const_ib);
461 static int radeon_cs_ib_chunk(struct radeon_device *rdev,
462 struct radeon_cs_parser *parser)
466 if (parser->chunk_ib_idx == -1)
469 if (parser->cs_flags & RADEON_CS_USE_VM)
472 r = radeon_cs_parse(rdev, parser->ring, parser);
473 if (r || parser->parser_error) {
474 DRM_ERROR("Invalid command stream !\n");
478 if (parser->ring == R600_RING_TYPE_UVD_INDEX)
479 radeon_uvd_note_usage(rdev);
480 else if ((parser->ring == TN_RING_TYPE_VCE1_INDEX) ||
481 (parser->ring == TN_RING_TYPE_VCE2_INDEX))
482 radeon_vce_note_usage(rdev);
484 radeon_cs_sync_rings(parser);
485 r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
487 DRM_ERROR("Failed to schedule IB !\n");
492 static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
493 struct radeon_vm *vm)
495 struct radeon_device *rdev = p->rdev;
496 struct radeon_bo_va *bo_va;
499 r = radeon_vm_update_page_directory(rdev, vm);
503 r = radeon_vm_clear_freed(rdev, vm);
507 if (vm->ib_bo_va == NULL) {
508 DRM_ERROR("Tmp BO not in VM!\n");
512 r = radeon_vm_bo_update(rdev, vm->ib_bo_va,
513 &rdev->ring_tmp_bo.bo->tbo.mem);
517 for (i = 0; i < p->nrelocs; i++) {
518 struct radeon_bo *bo;
520 /* ignore duplicates */
521 if (p->relocs_ptr[i] != &p->relocs[i])
524 bo = p->relocs[i].robj;
525 bo_va = radeon_vm_bo_find(vm, bo);
527 dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
531 r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
536 return radeon_vm_clear_invalids(rdev, vm);
539 static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
540 struct radeon_cs_parser *parser)
542 struct radeon_fpriv *fpriv = parser->filp->driver_priv;
543 struct radeon_vm *vm = &fpriv->vm;
546 if (parser->chunk_ib_idx == -1)
548 if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
551 if (parser->const_ib.length_dw) {
552 r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib);
558 r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib);
563 if (parser->ring == R600_RING_TYPE_UVD_INDEX)
564 radeon_uvd_note_usage(rdev);
566 mutex_lock(&vm->mutex);
567 r = radeon_bo_vm_update_pte(parser, vm);
571 radeon_cs_sync_rings(parser);
572 radeon_semaphore_sync_to(parser->ib.semaphore, vm->fence);
574 if ((rdev->family >= CHIP_TAHITI) &&
575 (parser->chunk_const_ib_idx != -1)) {
576 r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true);
578 r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
582 mutex_unlock(&vm->mutex);
586 static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r)
589 r = radeon_gpu_reset(rdev);
596 static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser *parser)
598 struct radeon_cs_chunk *ib_chunk;
599 struct radeon_vm *vm = NULL;
602 if (parser->chunk_ib_idx == -1)
605 if (parser->cs_flags & RADEON_CS_USE_VM) {
606 struct radeon_fpriv *fpriv = parser->filp->driver_priv;
609 if ((rdev->family >= CHIP_TAHITI) &&
610 (parser->chunk_const_ib_idx != -1)) {
611 ib_chunk = &parser->chunks[parser->chunk_const_ib_idx];
612 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
613 DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
616 r = radeon_ib_get(rdev, parser->ring, &parser->const_ib,
617 vm, ib_chunk->length_dw * 4);
619 DRM_ERROR("Failed to get const ib !\n");
622 parser->const_ib.is_const_ib = true;
623 parser->const_ib.length_dw = ib_chunk->length_dw;
624 if (copy_from_user(parser->const_ib.ptr,
626 ib_chunk->length_dw * 4))
630 ib_chunk = &parser->chunks[parser->chunk_ib_idx];
631 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
632 DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
636 ib_chunk = &parser->chunks[parser->chunk_ib_idx];
638 r = radeon_ib_get(rdev, parser->ring, &parser->ib,
639 vm, ib_chunk->length_dw * 4);
641 DRM_ERROR("Failed to get ib !\n");
644 parser->ib.length_dw = ib_chunk->length_dw;
646 memcpy(parser->ib.ptr, ib_chunk->kdata, ib_chunk->length_dw * 4);
647 else if (copy_from_user(parser->ib.ptr, ib_chunk->user_ptr, ib_chunk->length_dw * 4))
652 int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
654 struct radeon_device *rdev = dev->dev_private;
655 struct radeon_cs_parser parser;
658 down_read(&rdev->exclusive_lock);
659 if (!rdev->accel_working) {
660 up_read(&rdev->exclusive_lock);
663 if (rdev->in_reset) {
664 up_read(&rdev->exclusive_lock);
665 r = radeon_gpu_reset(rdev);
670 /* initialize parser */
671 memset(&parser, 0, sizeof(struct radeon_cs_parser));
674 parser.dev = rdev->dev;
675 parser.family = rdev->family;
676 r = radeon_cs_parser_init(&parser, data);
678 DRM_ERROR("Failed to initialize parser !\n");
679 radeon_cs_parser_fini(&parser, r, false);
680 up_read(&rdev->exclusive_lock);
681 r = radeon_cs_handle_lockup(rdev, r);
685 r = radeon_cs_ib_fill(rdev, &parser);
687 r = radeon_cs_parser_relocs(&parser);
688 if (r && r != -ERESTARTSYS)
689 DRM_ERROR("Failed to parse relocation %d!\n", r);
693 radeon_cs_parser_fini(&parser, r, false);
694 up_read(&rdev->exclusive_lock);
695 r = radeon_cs_handle_lockup(rdev, r);
699 trace_radeon_cs(&parser);
701 r = radeon_cs_ib_chunk(rdev, &parser);
705 r = radeon_cs_ib_vm_chunk(rdev, &parser);
710 radeon_cs_parser_fini(&parser, r, true);
711 up_read(&rdev->exclusive_lock);
712 r = radeon_cs_handle_lockup(rdev, r);
717 * radeon_cs_packet_parse() - parse cp packet and point ib index to next packet
718 * @parser: parser structure holding parsing context.
719 * @pkt: where to store packet information
721 * Assume that chunk_ib_index is properly set. Will return -EINVAL
722 * if packet is bigger than remaining ib size. or if packets is unknown.
724 int radeon_cs_packet_parse(struct radeon_cs_parser *p,
725 struct radeon_cs_packet *pkt,
728 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
729 struct radeon_device *rdev = p->rdev;
732 if (idx >= ib_chunk->length_dw) {
733 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
734 idx, ib_chunk->length_dw);
737 header = radeon_get_ib_value(p, idx);
739 pkt->type = RADEON_CP_PACKET_GET_TYPE(header);
740 pkt->count = RADEON_CP_PACKET_GET_COUNT(header);
743 case RADEON_PACKET_TYPE0:
744 if (rdev->family < CHIP_R600) {
745 pkt->reg = R100_CP_PACKET0_GET_REG(header);
747 RADEON_CP_PACKET0_GET_ONE_REG_WR(header);
749 pkt->reg = R600_CP_PACKET0_GET_REG(header);
751 case RADEON_PACKET_TYPE3:
752 pkt->opcode = RADEON_CP_PACKET3_GET_OPCODE(header);
754 case RADEON_PACKET_TYPE2:
758 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
761 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
762 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
763 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
770 * radeon_cs_packet_next_is_pkt3_nop() - test if the next packet is P3 NOP
771 * @p: structure holding the parser context.
773 * Check if the next packet is NOP relocation packet3.
775 bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
777 struct radeon_cs_packet p3reloc;
780 r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
783 if (p3reloc.type != RADEON_PACKET_TYPE3)
785 if (p3reloc.opcode != RADEON_PACKET3_NOP)
791 * radeon_cs_dump_packet() - dump raw packet context
792 * @p: structure holding the parser context.
793 * @pkt: structure holding the packet.
795 * Used mostly for debugging and error reporting.
797 void radeon_cs_dump_packet(struct radeon_cs_parser *p,
798 struct radeon_cs_packet *pkt)
800 volatile uint32_t *ib;
806 for (i = 0; i <= (pkt->count + 1); i++, idx++)
807 DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
811 * radeon_cs_packet_next_reloc() - parse next (should be reloc) packet
812 * @parser: parser structure holding parsing context.
813 * @data: pointer to relocation data
814 * @offset_start: starting offset
815 * @offset_mask: offset mask (to align start offset on)
816 * @reloc: reloc informations
818 * Check if next packet is relocation packet3, do bo validation and compute
819 * GPU offset using the provided start.
821 int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
822 struct radeon_cs_reloc **cs_reloc,
825 struct radeon_cs_chunk *relocs_chunk;
826 struct radeon_cs_packet p3reloc;
830 if (p->chunk_relocs_idx == -1) {
831 DRM_ERROR("No relocation chunk !\n");
835 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
836 r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
839 p->idx += p3reloc.count + 2;
840 if (p3reloc.type != RADEON_PACKET_TYPE3 ||
841 p3reloc.opcode != RADEON_PACKET3_NOP) {
842 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
844 radeon_cs_dump_packet(p, &p3reloc);
847 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
848 if (idx >= relocs_chunk->length_dw) {
849 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
850 idx, relocs_chunk->length_dw);
851 radeon_cs_dump_packet(p, &p3reloc);
854 /* FIXME: we assume reloc size is 4 dwords */
856 *cs_reloc = p->relocs;
857 (*cs_reloc)->gpu_offset =
858 (u64)relocs_chunk->kdata[idx + 3] << 32;
859 (*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0];
861 *cs_reloc = p->relocs_ptr[(idx / 4)];