2 * Copyright 2008 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
25 * Jerome Glisse <glisse@freedesktop.org>
27 #include <linux/list_sort.h>
29 #include <drm/radeon_drm.h>
30 #include "radeon_reg.h"
32 #include "radeon_trace.h"
34 #define RADEON_CS_MAX_PRIORITY 32u
35 #define RADEON_CS_NUM_BUCKETS (RADEON_CS_MAX_PRIORITY + 1)
37 /* This is based on the bucket sort with O(n) time complexity.
38 * An item with priority "i" is added to bucket[i]. The lists are then
39 * concatenated in descending order.
41 struct radeon_cs_buckets {
42 struct list_head bucket[RADEON_CS_NUM_BUCKETS];
45 static void radeon_cs_buckets_init(struct radeon_cs_buckets *b)
49 for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++)
50 INIT_LIST_HEAD(&b->bucket[i]);
53 static void radeon_cs_buckets_add(struct radeon_cs_buckets *b,
54 struct list_head *item, unsigned priority)
56 /* Since buffers which appear sooner in the relocation list are
57 * likely to be used more often than buffers which appear later
58 * in the list, the sort mustn't change the ordering of buffers
59 * with the same priority, i.e. it must be stable.
61 list_add_tail(item, &b->bucket[min(priority, RADEON_CS_MAX_PRIORITY)]);
64 static void radeon_cs_buckets_get_list(struct radeon_cs_buckets *b,
65 struct list_head *out_list)
69 /* Connect the sorted buckets in the output list. */
70 for (i = 0; i < RADEON_CS_NUM_BUCKETS; i++) {
71 list_splice(&b->bucket[i], out_list);
75 static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
77 struct drm_device *ddev = p->rdev->ddev;
78 struct radeon_cs_chunk *chunk;
79 struct radeon_cs_buckets buckets;
81 bool duplicate, need_mmap_lock = false;
84 if (p->chunk_relocs_idx == -1) {
87 chunk = &p->chunks[p->chunk_relocs_idx];
89 /* FIXME: we assume that each relocs use 4 dwords */
90 p->nrelocs = chunk->length_dw / 4;
91 p->relocs_ptr = kcalloc(p->nrelocs, sizeof(void *), GFP_KERNEL);
92 if (p->relocs_ptr == NULL) {
95 p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
96 if (p->relocs == NULL) {
100 radeon_cs_buckets_init(&buckets);
102 for (i = 0; i < p->nrelocs; i++) {
103 struct drm_radeon_cs_reloc *r;
107 r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
108 for (j = 0; j < i; j++) {
109 if (r->handle == p->relocs[j].handle) {
110 p->relocs_ptr[i] = &p->relocs[j];
116 p->relocs[i].handle = 0;
120 p->relocs[i].gobj = drm_gem_object_lookup(ddev, p->filp,
122 if (p->relocs[i].gobj == NULL) {
123 DRM_ERROR("gem object lookup failed 0x%x\n",
127 p->relocs_ptr[i] = &p->relocs[i];
128 p->relocs[i].robj = gem_to_radeon_bo(p->relocs[i].gobj);
130 /* The userspace buffer priorities are from 0 to 15. A higher
131 * number means the buffer is more important.
132 * Also, the buffers used for write have a higher priority than
133 * the buffers used for read only, which doubles the range
134 * to 0 to 31. 32 is reserved for the kernel driver.
136 priority = (r->flags & RADEON_RELOC_PRIO_MASK) * 2
139 /* the first reloc of an UVD job is the msg and that must be in
140 VRAM, also but everything into VRAM on AGP cards and older
141 IGP chips to avoid image corruptions */
142 if (p->ring == R600_RING_TYPE_UVD_INDEX &&
143 (i == 0 || drm_pci_device_is_agp(p->rdev->ddev) ||
144 p->rdev->family == CHIP_RS780 ||
145 p->rdev->family == CHIP_RS880)) {
147 /* TODO: is this still needed for NI+ ? */
148 p->relocs[i].prefered_domains =
149 RADEON_GEM_DOMAIN_VRAM;
151 p->relocs[i].allowed_domains =
152 RADEON_GEM_DOMAIN_VRAM;
154 /* prioritize this over any other relocation */
155 priority = RADEON_CS_MAX_PRIORITY;
157 uint32_t domain = r->write_domain ?
158 r->write_domain : r->read_domains;
160 if (domain & RADEON_GEM_DOMAIN_CPU) {
161 DRM_ERROR("RADEON_GEM_DOMAIN_CPU is not valid "
162 "for command submission\n");
166 p->relocs[i].prefered_domains = domain;
167 if (domain == RADEON_GEM_DOMAIN_VRAM)
168 domain |= RADEON_GEM_DOMAIN_GTT;
169 p->relocs[i].allowed_domains = domain;
172 if (radeon_ttm_tt_has_userptr(p->relocs[i].robj->tbo.ttm)) {
173 uint32_t domain = p->relocs[i].prefered_domains;
174 if (!(domain & RADEON_GEM_DOMAIN_GTT)) {
175 DRM_ERROR("Only RADEON_GEM_DOMAIN_GTT is "
176 "allowed for userptr BOs\n");
179 need_mmap_lock = true;
180 domain = RADEON_GEM_DOMAIN_GTT;
181 p->relocs[i].prefered_domains = domain;
182 p->relocs[i].allowed_domains = domain;
185 p->relocs[i].tv.bo = &p->relocs[i].robj->tbo;
186 p->relocs[i].handle = r->handle;
188 radeon_cs_buckets_add(&buckets, &p->relocs[i].tv.head,
192 radeon_cs_buckets_get_list(&buckets, &p->validated);
194 if (p->cs_flags & RADEON_CS_USE_VM)
195 p->vm_bos = radeon_vm_get_bos(p->rdev, p->ib.vm,
198 down_read(¤t->mm->mmap_sem);
200 r = radeon_bo_list_validate(p->rdev, &p->ticket, &p->validated, p->ring);
203 up_read(¤t->mm->mmap_sem);
208 static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
210 p->priority = priority;
214 DRM_ERROR("unknown ring id: %d\n", ring);
216 case RADEON_CS_RING_GFX:
217 p->ring = RADEON_RING_TYPE_GFX_INDEX;
219 case RADEON_CS_RING_COMPUTE:
220 if (p->rdev->family >= CHIP_TAHITI) {
222 p->ring = CAYMAN_RING_TYPE_CP1_INDEX;
224 p->ring = CAYMAN_RING_TYPE_CP2_INDEX;
226 p->ring = RADEON_RING_TYPE_GFX_INDEX;
228 case RADEON_CS_RING_DMA:
229 if (p->rdev->family >= CHIP_CAYMAN) {
231 p->ring = R600_RING_TYPE_DMA_INDEX;
233 p->ring = CAYMAN_RING_TYPE_DMA1_INDEX;
234 } else if (p->rdev->family >= CHIP_RV770) {
235 p->ring = R600_RING_TYPE_DMA_INDEX;
240 case RADEON_CS_RING_UVD:
241 p->ring = R600_RING_TYPE_UVD_INDEX;
243 case RADEON_CS_RING_VCE:
244 /* TODO: only use the low priority ring for now */
245 p->ring = TN_RING_TYPE_VCE1_INDEX;
251 static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
255 for (i = 0; i < p->nrelocs; i++) {
256 struct reservation_object *resv;
259 if (!p->relocs[i].robj)
262 resv = p->relocs[i].robj->tbo.resv;
263 fence = reservation_object_get_excl(resv);
265 radeon_semaphore_sync_to(p->ib.semaphore,
266 (struct radeon_fence *)fence);
270 /* XXX: note that this is called from the legacy UMS CS ioctl as well */
271 int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
273 struct drm_radeon_cs *cs = data;
274 uint64_t *chunk_array_ptr;
276 u32 ring = RADEON_CS_RING_GFX;
279 if (!cs->num_chunks) {
283 INIT_LIST_HEAD(&p->validated);
286 p->ib.semaphore = NULL;
287 p->const_ib.sa_bo = NULL;
288 p->const_ib.semaphore = NULL;
289 p->chunk_ib_idx = -1;
290 p->chunk_relocs_idx = -1;
291 p->chunk_flags_idx = -1;
292 p->chunk_const_ib_idx = -1;
293 p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
294 if (p->chunks_array == NULL) {
297 chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
298 if (copy_from_user(p->chunks_array, chunk_array_ptr,
299 sizeof(uint64_t)*cs->num_chunks)) {
303 p->nchunks = cs->num_chunks;
304 p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
305 if (p->chunks == NULL) {
308 for (i = 0; i < p->nchunks; i++) {
309 struct drm_radeon_cs_chunk __user **chunk_ptr = NULL;
310 struct drm_radeon_cs_chunk user_chunk;
311 uint32_t __user *cdata;
313 chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i];
314 if (copy_from_user(&user_chunk, chunk_ptr,
315 sizeof(struct drm_radeon_cs_chunk))) {
318 p->chunks[i].length_dw = user_chunk.length_dw;
319 p->chunks[i].chunk_id = user_chunk.chunk_id;
320 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
321 p->chunk_relocs_idx = i;
323 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
325 /* zero length IB isn't useful */
326 if (p->chunks[i].length_dw == 0)
329 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB) {
330 p->chunk_const_ib_idx = i;
331 /* zero length CONST IB isn't useful */
332 if (p->chunks[i].length_dw == 0)
335 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
336 p->chunk_flags_idx = i;
337 /* zero length flags aren't useful */
338 if (p->chunks[i].length_dw == 0)
342 size = p->chunks[i].length_dw;
343 cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
344 p->chunks[i].user_ptr = cdata;
345 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB)
348 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
349 if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP))
353 p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
354 size *= sizeof(uint32_t);
355 if (p->chunks[i].kdata == NULL) {
358 if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
361 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
362 p->cs_flags = p->chunks[i].kdata[0];
363 if (p->chunks[i].length_dw > 1)
364 ring = p->chunks[i].kdata[1];
365 if (p->chunks[i].length_dw > 2)
366 priority = (s32)p->chunks[i].kdata[2];
370 /* these are KMS only */
372 if ((p->cs_flags & RADEON_CS_USE_VM) &&
373 !p->rdev->vm_manager.enabled) {
374 DRM_ERROR("VM not active on asic!\n");
378 if (radeon_cs_get_ring(p, ring, priority))
381 /* we only support VM on some SI+ rings */
382 if ((p->cs_flags & RADEON_CS_USE_VM) == 0) {
383 if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) {
384 DRM_ERROR("Ring %d requires VM!\n", p->ring);
388 if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) {
389 DRM_ERROR("VM not supported on ring %d!\n",
399 static int cmp_size_smaller_first(void *priv, struct list_head *a,
402 struct radeon_cs_reloc *la = list_entry(a, struct radeon_cs_reloc, tv.head);
403 struct radeon_cs_reloc *lb = list_entry(b, struct radeon_cs_reloc, tv.head);
405 /* Sort A before B if A is smaller. */
406 return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
410 * cs_parser_fini() - clean parser states
411 * @parser: parser structure holding parsing context.
412 * @error: error number
414 * If error is set than unvalidate buffer, otherwise just free memory
415 * used by parsing context.
417 static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bool backoff)
422 /* Sort the buffer list from the smallest to largest buffer,
423 * which affects the order of buffers in the LRU list.
424 * This assures that the smallest buffers are added first
425 * to the LRU list, so they are likely to be later evicted
426 * first, instead of large buffers whose eviction is more
429 * This slightly lowers the number of bytes moved by TTM
430 * per frame under memory pressure.
432 list_sort(NULL, &parser->validated, cmp_size_smaller_first);
434 ttm_eu_fence_buffer_objects(&parser->ticket,
436 &parser->ib.fence->base);
437 } else if (backoff) {
438 ttm_eu_backoff_reservation(&parser->ticket,
442 if (parser->relocs != NULL) {
443 for (i = 0; i < parser->nrelocs; i++) {
444 if (parser->relocs[i].gobj)
445 drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
448 kfree(parser->track);
449 kfree(parser->relocs);
450 kfree(parser->relocs_ptr);
451 kfree(parser->vm_bos);
452 for (i = 0; i < parser->nchunks; i++)
453 drm_free_large(parser->chunks[i].kdata);
454 kfree(parser->chunks);
455 kfree(parser->chunks_array);
456 radeon_ib_free(parser->rdev, &parser->ib);
457 radeon_ib_free(parser->rdev, &parser->const_ib);
460 static int radeon_cs_ib_chunk(struct radeon_device *rdev,
461 struct radeon_cs_parser *parser)
465 if (parser->chunk_ib_idx == -1)
468 if (parser->cs_flags & RADEON_CS_USE_VM)
471 r = radeon_cs_parse(rdev, parser->ring, parser);
472 if (r || parser->parser_error) {
473 DRM_ERROR("Invalid command stream !\n");
477 if (parser->ring == R600_RING_TYPE_UVD_INDEX)
478 radeon_uvd_note_usage(rdev);
479 else if ((parser->ring == TN_RING_TYPE_VCE1_INDEX) ||
480 (parser->ring == TN_RING_TYPE_VCE2_INDEX))
481 radeon_vce_note_usage(rdev);
483 radeon_cs_sync_rings(parser);
484 r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
486 DRM_ERROR("Failed to schedule IB !\n");
491 static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
492 struct radeon_vm *vm)
494 struct radeon_device *rdev = p->rdev;
495 struct radeon_bo_va *bo_va;
498 r = radeon_vm_update_page_directory(rdev, vm);
502 r = radeon_vm_clear_freed(rdev, vm);
506 if (vm->ib_bo_va == NULL) {
507 DRM_ERROR("Tmp BO not in VM!\n");
511 r = radeon_vm_bo_update(rdev, vm->ib_bo_va,
512 &rdev->ring_tmp_bo.bo->tbo.mem);
516 for (i = 0; i < p->nrelocs; i++) {
517 struct radeon_bo *bo;
519 /* ignore duplicates */
520 if (p->relocs_ptr[i] != &p->relocs[i])
523 bo = p->relocs[i].robj;
524 bo_va = radeon_vm_bo_find(vm, bo);
526 dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
530 r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
535 return radeon_vm_clear_invalids(rdev, vm);
538 static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
539 struct radeon_cs_parser *parser)
541 struct radeon_fpriv *fpriv = parser->filp->driver_priv;
542 struct radeon_vm *vm = &fpriv->vm;
545 if (parser->chunk_ib_idx == -1)
547 if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
550 if (parser->const_ib.length_dw) {
551 r = radeon_ring_ib_parse(rdev, parser->ring, &parser->const_ib);
557 r = radeon_ring_ib_parse(rdev, parser->ring, &parser->ib);
562 if (parser->ring == R600_RING_TYPE_UVD_INDEX)
563 radeon_uvd_note_usage(rdev);
565 mutex_lock(&vm->mutex);
566 r = radeon_bo_vm_update_pte(parser, vm);
570 radeon_cs_sync_rings(parser);
571 radeon_semaphore_sync_to(parser->ib.semaphore, vm->fence);
573 if ((rdev->family >= CHIP_TAHITI) &&
574 (parser->chunk_const_ib_idx != -1)) {
575 r = radeon_ib_schedule(rdev, &parser->ib, &parser->const_ib, true);
577 r = radeon_ib_schedule(rdev, &parser->ib, NULL, true);
581 mutex_unlock(&vm->mutex);
585 static int radeon_cs_handle_lockup(struct radeon_device *rdev, int r)
588 r = radeon_gpu_reset(rdev);
595 static int radeon_cs_ib_fill(struct radeon_device *rdev, struct radeon_cs_parser *parser)
597 struct radeon_cs_chunk *ib_chunk;
598 struct radeon_vm *vm = NULL;
601 if (parser->chunk_ib_idx == -1)
604 if (parser->cs_flags & RADEON_CS_USE_VM) {
605 struct radeon_fpriv *fpriv = parser->filp->driver_priv;
608 if ((rdev->family >= CHIP_TAHITI) &&
609 (parser->chunk_const_ib_idx != -1)) {
610 ib_chunk = &parser->chunks[parser->chunk_const_ib_idx];
611 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
612 DRM_ERROR("cs IB CONST too big: %d\n", ib_chunk->length_dw);
615 r = radeon_ib_get(rdev, parser->ring, &parser->const_ib,
616 vm, ib_chunk->length_dw * 4);
618 DRM_ERROR("Failed to get const ib !\n");
621 parser->const_ib.is_const_ib = true;
622 parser->const_ib.length_dw = ib_chunk->length_dw;
623 if (copy_from_user(parser->const_ib.ptr,
625 ib_chunk->length_dw * 4))
629 ib_chunk = &parser->chunks[parser->chunk_ib_idx];
630 if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
631 DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
635 ib_chunk = &parser->chunks[parser->chunk_ib_idx];
637 r = radeon_ib_get(rdev, parser->ring, &parser->ib,
638 vm, ib_chunk->length_dw * 4);
640 DRM_ERROR("Failed to get ib !\n");
643 parser->ib.length_dw = ib_chunk->length_dw;
645 memcpy(parser->ib.ptr, ib_chunk->kdata, ib_chunk->length_dw * 4);
646 else if (copy_from_user(parser->ib.ptr, ib_chunk->user_ptr, ib_chunk->length_dw * 4))
651 int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
653 struct radeon_device *rdev = dev->dev_private;
654 struct radeon_cs_parser parser;
657 down_read(&rdev->exclusive_lock);
658 if (!rdev->accel_working) {
659 up_read(&rdev->exclusive_lock);
662 if (rdev->in_reset) {
663 up_read(&rdev->exclusive_lock);
664 r = radeon_gpu_reset(rdev);
669 /* initialize parser */
670 memset(&parser, 0, sizeof(struct radeon_cs_parser));
673 parser.dev = rdev->dev;
674 parser.family = rdev->family;
675 r = radeon_cs_parser_init(&parser, data);
677 DRM_ERROR("Failed to initialize parser !\n");
678 radeon_cs_parser_fini(&parser, r, false);
679 up_read(&rdev->exclusive_lock);
680 r = radeon_cs_handle_lockup(rdev, r);
684 r = radeon_cs_ib_fill(rdev, &parser);
686 r = radeon_cs_parser_relocs(&parser);
687 if (r && r != -ERESTARTSYS)
688 DRM_ERROR("Failed to parse relocation %d!\n", r);
692 radeon_cs_parser_fini(&parser, r, false);
693 up_read(&rdev->exclusive_lock);
694 r = radeon_cs_handle_lockup(rdev, r);
698 trace_radeon_cs(&parser);
700 r = radeon_cs_ib_chunk(rdev, &parser);
704 r = radeon_cs_ib_vm_chunk(rdev, &parser);
709 radeon_cs_parser_fini(&parser, r, true);
710 up_read(&rdev->exclusive_lock);
711 r = radeon_cs_handle_lockup(rdev, r);
716 * radeon_cs_packet_parse() - parse cp packet and point ib index to next packet
717 * @parser: parser structure holding parsing context.
718 * @pkt: where to store packet information
720 * Assume that chunk_ib_index is properly set. Will return -EINVAL
721 * if packet is bigger than remaining ib size. or if packets is unknown.
723 int radeon_cs_packet_parse(struct radeon_cs_parser *p,
724 struct radeon_cs_packet *pkt,
727 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
728 struct radeon_device *rdev = p->rdev;
731 if (idx >= ib_chunk->length_dw) {
732 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
733 idx, ib_chunk->length_dw);
736 header = radeon_get_ib_value(p, idx);
738 pkt->type = RADEON_CP_PACKET_GET_TYPE(header);
739 pkt->count = RADEON_CP_PACKET_GET_COUNT(header);
742 case RADEON_PACKET_TYPE0:
743 if (rdev->family < CHIP_R600) {
744 pkt->reg = R100_CP_PACKET0_GET_REG(header);
746 RADEON_CP_PACKET0_GET_ONE_REG_WR(header);
748 pkt->reg = R600_CP_PACKET0_GET_REG(header);
750 case RADEON_PACKET_TYPE3:
751 pkt->opcode = RADEON_CP_PACKET3_GET_OPCODE(header);
753 case RADEON_PACKET_TYPE2:
757 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
760 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
761 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
762 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
769 * radeon_cs_packet_next_is_pkt3_nop() - test if the next packet is P3 NOP
770 * @p: structure holding the parser context.
772 * Check if the next packet is NOP relocation packet3.
774 bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
776 struct radeon_cs_packet p3reloc;
779 r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
782 if (p3reloc.type != RADEON_PACKET_TYPE3)
784 if (p3reloc.opcode != RADEON_PACKET3_NOP)
790 * radeon_cs_dump_packet() - dump raw packet context
791 * @p: structure holding the parser context.
792 * @pkt: structure holding the packet.
794 * Used mostly for debugging and error reporting.
796 void radeon_cs_dump_packet(struct radeon_cs_parser *p,
797 struct radeon_cs_packet *pkt)
799 volatile uint32_t *ib;
805 for (i = 0; i <= (pkt->count + 1); i++, idx++)
806 DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
810 * radeon_cs_packet_next_reloc() - parse next (should be reloc) packet
811 * @parser: parser structure holding parsing context.
812 * @data: pointer to relocation data
813 * @offset_start: starting offset
814 * @offset_mask: offset mask (to align start offset on)
815 * @reloc: reloc informations
817 * Check if next packet is relocation packet3, do bo validation and compute
818 * GPU offset using the provided start.
820 int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
821 struct radeon_cs_reloc **cs_reloc,
824 struct radeon_cs_chunk *relocs_chunk;
825 struct radeon_cs_packet p3reloc;
829 if (p->chunk_relocs_idx == -1) {
830 DRM_ERROR("No relocation chunk !\n");
834 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
835 r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
838 p->idx += p3reloc.count + 2;
839 if (p3reloc.type != RADEON_PACKET_TYPE3 ||
840 p3reloc.opcode != RADEON_PACKET3_NOP) {
841 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
843 radeon_cs_dump_packet(p, &p3reloc);
846 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
847 if (idx >= relocs_chunk->length_dw) {
848 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
849 idx, relocs_chunk->length_dw);
850 radeon_cs_dump_packet(p, &p3reloc);
853 /* FIXME: we assume reloc size is 4 dwords */
855 *cs_reloc = p->relocs;
856 (*cs_reloc)->gpu_offset =
857 (u64)relocs_chunk->kdata[idx + 3] << 32;
858 (*cs_reloc)->gpu_offset |= relocs_chunk->kdata[idx + 0];
860 *cs_reloc = p->relocs_ptr[(idx / 4)];