Loading drivers/gpu/drm/radeon/cik_sdma.c +122 −67 Original line number Diff line number Diff line Loading @@ -749,37 +749,28 @@ bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) } /** * cik_sdma_vm_set_page - update the page tables using sDMA * cik_sdma_vm_copy_pages - update PTEs by copying them from the GART * * @rdev: radeon_device pointer * @ib: indirect buffer to fill with commands * @pe: addr of the page entry * @addr: dst addr to write into pe * @src: src addr to copy from * @count: number of page entries to update * @incr: increase next addr by incr bytes * @flags: access flags * * Update the page tables using sDMA (CIK). * Update PTEs by copying them from the GART using sDMA (CIK). */ void cik_sdma_vm_set_page(struct radeon_device *rdev, void cik_sdma_vm_copy_pages(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) uint64_t pe, uint64_t src, unsigned count) { uint64_t value; unsigned ndw; trace_radeon_vm_set_page(pe, addr, count, incr, flags); if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) { uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8; while (count) { unsigned bytes = count * 8; if (bytes > 0x1FFFF8) bytes = 0x1FFFF8; ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); ib->ptr[ib->length_dw++] = bytes; ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ ib->ptr[ib->length_dw++] = lower_32_bits(src); Loading @@ -791,27 +782,80 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev, src += bytes; count -= bytes / 8; } } else if (flags & R600_PTE_SYSTEM) { } /** * cik_sdma_vm_write_pages - update PTEs by writing them manually * * @rdev: radeon_device pointer * @ib: indirect buffer to fill with commands * @pe: addr of the page entry * @addr: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes * @flags: access flags * * Update PTEs by writing them manually using sDMA (CIK). */ void cik_sdma_vm_write_pages(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) { uint64_t value; unsigned ndw; while (count) { ndw = count * 2; if (ndw > 0xFFFFE) ndw = 0xFFFFE; /* for non-physically contiguous pages (system) */ ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); ib->ptr[ib->length_dw++] = pe; ib->ptr[ib->length_dw++] = upper_32_bits(pe); ib->ptr[ib->length_dw++] = ndw; for (; ndw > 0; ndw -= 2, --count, pe += 8) { if (flags & R600_PTE_SYSTEM) { value = radeon_vm_map_gart(rdev, addr); value &= 0xFFFFFFFFFFFFF000ULL; } else if (flags & R600_PTE_VALID) { value = addr; } else { value = 0; } addr += incr; value |= flags; ib->ptr[ib->length_dw++] = value; ib->ptr[ib->length_dw++] = upper_32_bits(value); } } } else { } /** * cik_sdma_vm_set_pages - update the page tables using sDMA * * @rdev: radeon_device pointer * @ib: indirect buffer to fill with commands * @pe: addr of the page entry * @addr: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes * @flags: access flags * * Update the page tables using sDMA (CIK). */ void cik_sdma_vm_set_pages(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) { uint64_t value; unsigned ndw; while (count) { ndw = count; if (ndw > 0x7FFFF) Loading @@ -821,6 +865,7 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev, value = addr; else value = 0; /* for physically contiguous pages (vram) */ ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0); ib->ptr[ib->length_dw++] = pe; /* dst addr */ Loading @@ -832,11 +877,21 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev, ib->ptr[ib->length_dw++] = incr; /* increment size */ ib->ptr[ib->length_dw++] = 0; ib->ptr[ib->length_dw++] = ndw; /* number of entries */ pe += ndw * 8; addr += ndw * incr; count -= ndw; } } /** * cik_sdma_vm_pad_ib - pad the IB to the required number of dw * * @ib: indirect buffer to fill with padding * */ void cik_sdma_vm_pad_ib(struct radeon_ib *ib) { while (ib->length_dw & 0x7) ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0); } Loading drivers/gpu/drm/radeon/ni_dma.c +123 −74 Original line number Diff line number Diff line Loading @@ -307,31 +307,23 @@ bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) } /** * cayman_dma_vm_set_page - update the page tables using the DMA * cayman_dma_vm_copy_pages - update PTEs by copying them from the GART * * @rdev: radeon_device pointer * @ib: indirect buffer to fill with commands * @pe: addr of the page entry * @addr: dst addr to write into pe * @src: src addr where to copy from * @count: number of page entries to update * @incr: increase next addr by incr bytes * @flags: hw access flags * * Update the page tables using the DMA (cayman/TN). * Update PTEs by copying them from the GART using the DMA (cayman/TN). */ void cayman_dma_vm_set_page(struct radeon_device *rdev, void cayman_dma_vm_copy_pages(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) uint64_t pe, uint64_t src, unsigned count) { uint64_t value; unsigned ndw; trace_radeon_vm_set_page(pe, addr, count, incr, flags); if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) { uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8; while (count) { ndw = count * 2; if (ndw > 0xFFFFE) Loading @@ -348,15 +340,38 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev, src += ndw * 4; count -= ndw / 2; } } /** * cayman_dma_vm_write_pages - update PTEs by writing them manually * * @rdev: radeon_device pointer * @ib: indirect buffer to fill with commands * @pe: addr of the page entry * @addr: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes * @flags: hw access flags * * Update PTEs by writing them manually using the DMA (cayman/TN). */ void cayman_dma_vm_write_pages(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) { uint64_t value; unsigned ndw; } else if ((flags & R600_PTE_SYSTEM) || (count == 1)) { while (count) { ndw = count * 2; if (ndw > 0xFFFFE) ndw = 0xFFFFE; /* for non-physically contiguous pages (system) */ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw); ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw); ib->ptr[ib->length_dw++] = pe; ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; for (; ndw > 0; ndw -= 2, --count, pe += 8) { Loading @@ -374,7 +389,30 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev, ib->ptr[ib->length_dw++] = upper_32_bits(value); } } } else { } /** * cayman_dma_vm_set_pages - update the page tables using the DMA * * @rdev: radeon_device pointer * @ib: indirect buffer to fill with commands * @pe: addr of the page entry * @addr: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes * @flags: hw access flags * * Update the page tables using the DMA (cayman/TN). */ void cayman_dma_vm_set_pages(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) { uint64_t value; unsigned ndw; while (count) { ndw = count * 2; if (ndw > 0xFFFFE) Loading @@ -384,6 +422,7 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev, value = addr; else value = 0; /* for physically contiguous pages (vram) */ ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw); ib->ptr[ib->length_dw++] = pe; /* dst addr */ Loading @@ -394,11 +433,21 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev, ib->ptr[ib->length_dw++] = upper_32_bits(value); ib->ptr[ib->length_dw++] = incr; /* increment size */ ib->ptr[ib->length_dw++] = 0; pe += ndw * 4; addr += (ndw / 2) * incr; count -= ndw / 2; } } /** * cayman_dma_vm_pad_ib - pad the IB to the required number of dw * * @ib: indirect buffer to fill with padding * */ void cayman_dma_vm_pad_ib(struct radeon_ib *ib) { while (ib->length_dw & 0x7) ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0); } Loading drivers/gpu/drm/radeon/radeon.h +19 −6 Original line number Diff line number Diff line Loading @@ -1797,11 +1797,21 @@ struct radeon_asic { struct { int (*init)(struct radeon_device *rdev); void (*fini)(struct radeon_device *rdev); void (*set_page)(struct radeon_device *rdev, void (*copy_pages)(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t src, unsigned count); void (*write_pages)(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags); void (*set_pages)(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags); void (*pad_ib)(struct radeon_ib *ib); } vm; /* ring specific callbacks */ struct radeon_asic_ring *ring[RADEON_NUM_RINGS]; Loading Loading @@ -2761,7 +2771,10 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v); #define radeon_gart_set_page(rdev, i, p, f) (rdev)->asic->gart.set_page((rdev), (i), (p), (f)) #define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev)) #define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev)) #define radeon_asic_vm_set_page(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (ib), (pe), (addr), (count), (incr), (flags))) #define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count))) #define radeon_asic_vm_write_pages(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.write_pages((rdev), (ib), (pe), (addr), (count), (incr), (flags))) #define radeon_asic_vm_set_pages(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_pages((rdev), (ib), (pe), (addr), (count), (incr), (flags))) #define radeon_asic_vm_pad_ib(rdev, ib) ((rdev)->asic->vm.pad_ib((ib))) #define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_start((rdev), (cp)) #define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_test((rdev), (cp)) #define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ib_test((rdev), (cp)) Loading drivers/gpu/drm/radeon/radeon_asic.c +20 −5 Original line number Diff line number Diff line Loading @@ -1613,7 +1613,10 @@ static struct radeon_asic cayman_asic = { .vm = { .init = &cayman_vm_init, .fini = &cayman_vm_fini, .set_page = &cayman_dma_vm_set_page, .copy_pages = &cayman_dma_vm_copy_pages, .write_pages = &cayman_dma_vm_write_pages, .set_pages = &cayman_dma_vm_set_pages, .pad_ib = &cayman_dma_vm_pad_ib, }, .ring = { [RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring, Loading Loading @@ -1713,7 +1716,10 @@ static struct radeon_asic trinity_asic = { .vm = { .init = &cayman_vm_init, .fini = &cayman_vm_fini, .set_page = &cayman_dma_vm_set_page, .copy_pages = &cayman_dma_vm_copy_pages, .write_pages = &cayman_dma_vm_write_pages, .set_pages = &cayman_dma_vm_set_pages, .pad_ib = &cayman_dma_vm_pad_ib, }, .ring = { [RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring, Loading Loading @@ -1843,7 +1849,10 @@ static struct radeon_asic si_asic = { .vm = { .init = &si_vm_init, .fini = &si_vm_fini, .set_page = &si_dma_vm_set_page, .copy_pages = &si_dma_vm_copy_pages, .write_pages = &si_dma_vm_write_pages, .set_pages = &si_dma_vm_set_pages, .pad_ib = &cayman_dma_vm_pad_ib, }, .ring = { [RADEON_RING_TYPE_GFX_INDEX] = &si_gfx_ring, Loading Loading @@ -2001,7 +2010,10 @@ static struct radeon_asic ci_asic = { .vm = { .init = &cik_vm_init, .fini = &cik_vm_fini, .set_page = &cik_sdma_vm_set_page, .copy_pages = &cik_sdma_vm_copy_pages, .write_pages = &cik_sdma_vm_write_pages, .set_pages = &cik_sdma_vm_set_pages, .pad_ib = &cik_sdma_vm_pad_ib, }, .ring = { [RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring, Loading Loading @@ -2105,7 +2117,10 @@ static struct radeon_asic kv_asic = { .vm = { .init = &cik_vm_init, .fini = &cik_vm_fini, .set_page = &cik_sdma_vm_set_page, .copy_pages = &cik_sdma_vm_copy_pages, .write_pages = &cik_sdma_vm_write_pages, .set_pages = &cik_sdma_vm_set_pages, .pad_ib = &cik_sdma_vm_pad_ib, }, .ring = { [RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring, Loading drivers/gpu/drm/radeon/radeon_asic.h +49 −15 Original line number Diff line number Diff line Loading @@ -607,11 +607,22 @@ void cayman_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring); bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring); void cayman_dma_vm_set_page(struct radeon_device *rdev, void cayman_dma_vm_copy_pages(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t src, unsigned count); void cayman_dma_vm_write_pages(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags); void cayman_dma_vm_set_pages(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags); void cayman_dma_vm_pad_ib(struct radeon_ib *ib); void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); Loading Loading @@ -694,11 +705,22 @@ int si_copy_dma(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, unsigned num_gpu_pages, struct radeon_fence **fence); void si_dma_vm_set_page(struct radeon_device *rdev, void si_dma_vm_copy_pages(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t src, unsigned count); void si_dma_vm_write_pages(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags); void si_dma_vm_set_pages(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags); void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); u32 si_get_xclk(struct radeon_device *rdev); uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev); Loading Loading @@ -772,11 +794,23 @@ int cik_irq_process(struct radeon_device *rdev); int cik_vm_init(struct radeon_device *rdev); void cik_vm_fini(struct radeon_device *rdev); void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); void cik_sdma_vm_set_page(struct radeon_device *rdev, void cik_sdma_vm_copy_pages(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t src, unsigned count); void cik_sdma_vm_write_pages(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags); void cik_sdma_vm_set_pages(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags); void cik_sdma_vm_pad_ib(struct radeon_ib *ib); void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); u32 cik_gfx_get_rptr(struct radeon_device *rdev, Loading Loading
drivers/gpu/drm/radeon/cik_sdma.c +122 −67 Original line number Diff line number Diff line Loading @@ -749,37 +749,28 @@ bool cik_sdma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) } /** * cik_sdma_vm_set_page - update the page tables using sDMA * cik_sdma_vm_copy_pages - update PTEs by copying them from the GART * * @rdev: radeon_device pointer * @ib: indirect buffer to fill with commands * @pe: addr of the page entry * @addr: dst addr to write into pe * @src: src addr to copy from * @count: number of page entries to update * @incr: increase next addr by incr bytes * @flags: access flags * * Update the page tables using sDMA (CIK). * Update PTEs by copying them from the GART using sDMA (CIK). */ void cik_sdma_vm_set_page(struct radeon_device *rdev, void cik_sdma_vm_copy_pages(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) uint64_t pe, uint64_t src, unsigned count) { uint64_t value; unsigned ndw; trace_radeon_vm_set_page(pe, addr, count, incr, flags); if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) { uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8; while (count) { unsigned bytes = count * 8; if (bytes > 0x1FFFF8) bytes = 0x1FFFF8; ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_COPY, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); ib->ptr[ib->length_dw++] = bytes; ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ ib->ptr[ib->length_dw++] = lower_32_bits(src); Loading @@ -791,27 +782,80 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev, src += bytes; count -= bytes / 8; } } else if (flags & R600_PTE_SYSTEM) { } /** * cik_sdma_vm_write_pages - update PTEs by writing them manually * * @rdev: radeon_device pointer * @ib: indirect buffer to fill with commands * @pe: addr of the page entry * @addr: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes * @flags: access flags * * Update PTEs by writing them manually using sDMA (CIK). */ void cik_sdma_vm_write_pages(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) { uint64_t value; unsigned ndw; while (count) { ndw = count * 2; if (ndw > 0xFFFFE) ndw = 0xFFFFE; /* for non-physically contiguous pages (system) */ ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); ib->ptr[ib->length_dw++] = pe; ib->ptr[ib->length_dw++] = upper_32_bits(pe); ib->ptr[ib->length_dw++] = ndw; for (; ndw > 0; ndw -= 2, --count, pe += 8) { if (flags & R600_PTE_SYSTEM) { value = radeon_vm_map_gart(rdev, addr); value &= 0xFFFFFFFFFFFFF000ULL; } else if (flags & R600_PTE_VALID) { value = addr; } else { value = 0; } addr += incr; value |= flags; ib->ptr[ib->length_dw++] = value; ib->ptr[ib->length_dw++] = upper_32_bits(value); } } } else { } /** * cik_sdma_vm_set_pages - update the page tables using sDMA * * @rdev: radeon_device pointer * @ib: indirect buffer to fill with commands * @pe: addr of the page entry * @addr: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes * @flags: access flags * * Update the page tables using sDMA (CIK). */ void cik_sdma_vm_set_pages(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) { uint64_t value; unsigned ndw; while (count) { ndw = count; if (ndw > 0x7FFFF) Loading @@ -821,6 +865,7 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev, value = addr; else value = 0; /* for physically contiguous pages (vram) */ ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_GENERATE_PTE_PDE, 0, 0); ib->ptr[ib->length_dw++] = pe; /* dst addr */ Loading @@ -832,11 +877,21 @@ void cik_sdma_vm_set_page(struct radeon_device *rdev, ib->ptr[ib->length_dw++] = incr; /* increment size */ ib->ptr[ib->length_dw++] = 0; ib->ptr[ib->length_dw++] = ndw; /* number of entries */ pe += ndw * 8; addr += ndw * incr; count -= ndw; } } /** * cik_sdma_vm_pad_ib - pad the IB to the required number of dw * * @ib: indirect buffer to fill with padding * */ void cik_sdma_vm_pad_ib(struct radeon_ib *ib) { while (ib->length_dw & 0x7) ib->ptr[ib->length_dw++] = SDMA_PACKET(SDMA_OPCODE_NOP, 0, 0); } Loading
drivers/gpu/drm/radeon/ni_dma.c +123 −74 Original line number Diff line number Diff line Loading @@ -307,31 +307,23 @@ bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) } /** * cayman_dma_vm_set_page - update the page tables using the DMA * cayman_dma_vm_copy_pages - update PTEs by copying them from the GART * * @rdev: radeon_device pointer * @ib: indirect buffer to fill with commands * @pe: addr of the page entry * @addr: dst addr to write into pe * @src: src addr where to copy from * @count: number of page entries to update * @incr: increase next addr by incr bytes * @flags: hw access flags * * Update the page tables using the DMA (cayman/TN). * Update PTEs by copying them from the GART using the DMA (cayman/TN). */ void cayman_dma_vm_set_page(struct radeon_device *rdev, void cayman_dma_vm_copy_pages(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) uint64_t pe, uint64_t src, unsigned count) { uint64_t value; unsigned ndw; trace_radeon_vm_set_page(pe, addr, count, incr, flags); if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) { uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8; while (count) { ndw = count * 2; if (ndw > 0xFFFFE) Loading @@ -348,15 +340,38 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev, src += ndw * 4; count -= ndw / 2; } } /** * cayman_dma_vm_write_pages - update PTEs by writing them manually * * @rdev: radeon_device pointer * @ib: indirect buffer to fill with commands * @pe: addr of the page entry * @addr: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes * @flags: hw access flags * * Update PTEs by writing them manually using the DMA (cayman/TN). */ void cayman_dma_vm_write_pages(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) { uint64_t value; unsigned ndw; } else if ((flags & R600_PTE_SYSTEM) || (count == 1)) { while (count) { ndw = count * 2; if (ndw > 0xFFFFE) ndw = 0xFFFFE; /* for non-physically contiguous pages (system) */ ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw); ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, ndw); ib->ptr[ib->length_dw++] = pe; ib->ptr[ib->length_dw++] = upper_32_bits(pe) & 0xff; for (; ndw > 0; ndw -= 2, --count, pe += 8) { Loading @@ -374,7 +389,30 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev, ib->ptr[ib->length_dw++] = upper_32_bits(value); } } } else { } /** * cayman_dma_vm_set_pages - update the page tables using the DMA * * @rdev: radeon_device pointer * @ib: indirect buffer to fill with commands * @pe: addr of the page entry * @addr: dst addr to write into pe * @count: number of page entries to update * @incr: increase next addr by incr bytes * @flags: hw access flags * * Update the page tables using the DMA (cayman/TN). */ void cayman_dma_vm_set_pages(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags) { uint64_t value; unsigned ndw; while (count) { ndw = count * 2; if (ndw > 0xFFFFE) Loading @@ -384,6 +422,7 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev, value = addr; else value = 0; /* for physically contiguous pages (vram) */ ib->ptr[ib->length_dw++] = DMA_PTE_PDE_PACKET(ndw); ib->ptr[ib->length_dw++] = pe; /* dst addr */ Loading @@ -394,11 +433,21 @@ void cayman_dma_vm_set_page(struct radeon_device *rdev, ib->ptr[ib->length_dw++] = upper_32_bits(value); ib->ptr[ib->length_dw++] = incr; /* increment size */ ib->ptr[ib->length_dw++] = 0; pe += ndw * 4; addr += (ndw / 2) * incr; count -= ndw / 2; } } /** * cayman_dma_vm_pad_ib - pad the IB to the required number of dw * * @ib: indirect buffer to fill with padding * */ void cayman_dma_vm_pad_ib(struct radeon_ib *ib) { while (ib->length_dw & 0x7) ib->ptr[ib->length_dw++] = DMA_PACKET(DMA_PACKET_NOP, 0, 0, 0); } Loading
drivers/gpu/drm/radeon/radeon.h +19 −6 Original line number Diff line number Diff line Loading @@ -1797,11 +1797,21 @@ struct radeon_asic { struct { int (*init)(struct radeon_device *rdev); void (*fini)(struct radeon_device *rdev); void (*set_page)(struct radeon_device *rdev, void (*copy_pages)(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t src, unsigned count); void (*write_pages)(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags); void (*set_pages)(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags); void (*pad_ib)(struct radeon_ib *ib); } vm; /* ring specific callbacks */ struct radeon_asic_ring *ring[RADEON_NUM_RINGS]; Loading Loading @@ -2761,7 +2771,10 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v); #define radeon_gart_set_page(rdev, i, p, f) (rdev)->asic->gart.set_page((rdev), (i), (p), (f)) #define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev)) #define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev)) #define radeon_asic_vm_set_page(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_page((rdev), (ib), (pe), (addr), (count), (incr), (flags))) #define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count))) #define radeon_asic_vm_write_pages(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.write_pages((rdev), (ib), (pe), (addr), (count), (incr), (flags))) #define radeon_asic_vm_set_pages(rdev, ib, pe, addr, count, incr, flags) ((rdev)->asic->vm.set_pages((rdev), (ib), (pe), (addr), (count), (incr), (flags))) #define radeon_asic_vm_pad_ib(rdev, ib) ((rdev)->asic->vm.pad_ib((ib))) #define radeon_ring_start(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_start((rdev), (cp)) #define radeon_ring_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ring_test((rdev), (cp)) #define radeon_ib_test(rdev, r, cp) (rdev)->asic->ring[(r)]->ib_test((rdev), (cp)) Loading
drivers/gpu/drm/radeon/radeon_asic.c +20 −5 Original line number Diff line number Diff line Loading @@ -1613,7 +1613,10 @@ static struct radeon_asic cayman_asic = { .vm = { .init = &cayman_vm_init, .fini = &cayman_vm_fini, .set_page = &cayman_dma_vm_set_page, .copy_pages = &cayman_dma_vm_copy_pages, .write_pages = &cayman_dma_vm_write_pages, .set_pages = &cayman_dma_vm_set_pages, .pad_ib = &cayman_dma_vm_pad_ib, }, .ring = { [RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring, Loading Loading @@ -1713,7 +1716,10 @@ static struct radeon_asic trinity_asic = { .vm = { .init = &cayman_vm_init, .fini = &cayman_vm_fini, .set_page = &cayman_dma_vm_set_page, .copy_pages = &cayman_dma_vm_copy_pages, .write_pages = &cayman_dma_vm_write_pages, .set_pages = &cayman_dma_vm_set_pages, .pad_ib = &cayman_dma_vm_pad_ib, }, .ring = { [RADEON_RING_TYPE_GFX_INDEX] = &cayman_gfx_ring, Loading Loading @@ -1843,7 +1849,10 @@ static struct radeon_asic si_asic = { .vm = { .init = &si_vm_init, .fini = &si_vm_fini, .set_page = &si_dma_vm_set_page, .copy_pages = &si_dma_vm_copy_pages, .write_pages = &si_dma_vm_write_pages, .set_pages = &si_dma_vm_set_pages, .pad_ib = &cayman_dma_vm_pad_ib, }, .ring = { [RADEON_RING_TYPE_GFX_INDEX] = &si_gfx_ring, Loading Loading @@ -2001,7 +2010,10 @@ static struct radeon_asic ci_asic = { .vm = { .init = &cik_vm_init, .fini = &cik_vm_fini, .set_page = &cik_sdma_vm_set_page, .copy_pages = &cik_sdma_vm_copy_pages, .write_pages = &cik_sdma_vm_write_pages, .set_pages = &cik_sdma_vm_set_pages, .pad_ib = &cik_sdma_vm_pad_ib, }, .ring = { [RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring, Loading Loading @@ -2105,7 +2117,10 @@ static struct radeon_asic kv_asic = { .vm = { .init = &cik_vm_init, .fini = &cik_vm_fini, .set_page = &cik_sdma_vm_set_page, .copy_pages = &cik_sdma_vm_copy_pages, .write_pages = &cik_sdma_vm_write_pages, .set_pages = &cik_sdma_vm_set_pages, .pad_ib = &cik_sdma_vm_pad_ib, }, .ring = { [RADEON_RING_TYPE_GFX_INDEX] = &ci_gfx_ring, Loading
drivers/gpu/drm/radeon/radeon_asic.h +49 −15 Original line number Diff line number Diff line Loading @@ -607,11 +607,22 @@ void cayman_dma_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib); bool cayman_gfx_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring); bool cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring); void cayman_dma_vm_set_page(struct radeon_device *rdev, void cayman_dma_vm_copy_pages(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t src, unsigned count); void cayman_dma_vm_write_pages(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags); void cayman_dma_vm_set_pages(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags); void cayman_dma_vm_pad_ib(struct radeon_ib *ib); void cayman_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); Loading Loading @@ -694,11 +705,22 @@ int si_copy_dma(struct radeon_device *rdev, uint64_t src_offset, uint64_t dst_offset, unsigned num_gpu_pages, struct radeon_fence **fence); void si_dma_vm_set_page(struct radeon_device *rdev, void si_dma_vm_copy_pages(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t src, unsigned count); void si_dma_vm_write_pages(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags); void si_dma_vm_set_pages(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags); void si_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); u32 si_get_xclk(struct radeon_device *rdev); uint64_t si_get_gpu_clock_counter(struct radeon_device *rdev); Loading Loading @@ -772,11 +794,23 @@ int cik_irq_process(struct radeon_device *rdev); int cik_vm_init(struct radeon_device *rdev); void cik_vm_fini(struct radeon_device *rdev); void cik_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); void cik_sdma_vm_set_page(struct radeon_device *rdev, void cik_sdma_vm_copy_pages(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t src, unsigned count); void cik_sdma_vm_write_pages(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags); void cik_sdma_vm_set_pages(struct radeon_device *rdev, struct radeon_ib *ib, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint32_t flags); void cik_sdma_vm_pad_ib(struct radeon_ib *ib); void cik_dma_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm); int cik_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); u32 cik_gfx_get_rptr(struct radeon_device *rdev, Loading