Commit 6b252cf4 authored by Danilo Krummrich's avatar Danilo Krummrich
Browse files

drm/nouveau: nvkm/vmm: implement raw ops to manage uvmm



The new VM_BIND UAPI uses the DRM GPU VA manager to manage the VA space.
Hence, we a need a way to manipulate the MMUs page tables without going
through the internal range allocator implemented by nvkm/vmm.

This patch adds a raw interface for nvkm/vmm to pass the resposibility
for managing the address space and the corresponding map/unmap/sparse
operations to the upper layers.

Reviewed-by: default avatarDave Airlie <airlied@redhat.com>
Signed-off-by: default avatarDanilo Krummrich <dakr@redhat.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230804182406.5222-11-dakr@redhat.com
parent 7576c4ca
Loading
Loading
Loading
Loading
+25 −1
Original line number Diff line number Diff line
@@ -3,7 +3,10 @@
struct nvif_vmm_v0 {
	__u8  version;
	__u8  page_nr;
	__u8  managed;
#define NVIF_VMM_V0_TYPE_UNMANAGED                                         0x00
#define NVIF_VMM_V0_TYPE_MANAGED                                           0x01
#define NVIF_VMM_V0_TYPE_RAW                                               0x02
	__u8  type;
	__u8  pad03[5];
	__u64 addr;
	__u64 size;
@@ -17,6 +20,7 @@ struct nvif_vmm_v0 {
#define NVIF_VMM_V0_UNMAP                                                  0x04
#define NVIF_VMM_V0_PFNMAP                                                 0x05
#define NVIF_VMM_V0_PFNCLR                                                 0x06
#define NVIF_VMM_V0_RAW                                                    0x07
#define NVIF_VMM_V0_MTHD(i)                                         ((i) + 0x80)

struct nvif_vmm_page_v0 {
@@ -66,6 +70,26 @@ struct nvif_vmm_unmap_v0 {
	__u64 addr;
};

struct nvif_vmm_raw_v0 {
	__u8 version;
#define NVIF_VMM_RAW_V0_GET	0x0
#define NVIF_VMM_RAW_V0_PUT	0x1
#define NVIF_VMM_RAW_V0_MAP	0x2
#define NVIF_VMM_RAW_V0_UNMAP	0x3
#define NVIF_VMM_RAW_V0_SPARSE	0x4
	__u8  op;
	__u8  sparse;
	__u8  ref;
	__u8  shift;
	__u32 argc;
	__u8  pad01[7];
	__u64 addr;
	__u64 size;
	__u64 offset;
	__u64 memory;
	__u64 argv;
};

struct nvif_vmm_pfnmap_v0 {
	__u8  version;
	__u8  page;
+17 −2
Original line number Diff line number Diff line
@@ -4,6 +4,12 @@
struct nvif_mem;
struct nvif_mmu;

enum nvif_vmm_type {
	UNMANAGED,
	MANAGED,
	RAW,
};

enum nvif_vmm_get {
	ADDR,
	PTES,
@@ -30,8 +36,9 @@ struct nvif_vmm {
	int page_nr;
};

int nvif_vmm_ctor(struct nvif_mmu *, const char *name, s32 oclass, bool managed,
		  u64 addr, u64 size, void *argv, u32 argc, struct nvif_vmm *);
int nvif_vmm_ctor(struct nvif_mmu *, const char *name, s32 oclass,
		  enum nvif_vmm_type, u64 addr, u64 size, void *argv, u32 argc,
		  struct nvif_vmm *);
void nvif_vmm_dtor(struct nvif_vmm *);
int nvif_vmm_get(struct nvif_vmm *, enum nvif_vmm_get, bool sparse,
		 u8 page, u8 align, u64 size, struct nvif_vma *);
@@ -39,4 +46,12 @@ void nvif_vmm_put(struct nvif_vmm *, struct nvif_vma *);
int nvif_vmm_map(struct nvif_vmm *, u64 addr, u64 size, void *argv, u32 argc,
		 struct nvif_mem *, u64 offset);
int nvif_vmm_unmap(struct nvif_vmm *, u64);

int nvif_vmm_raw_get(struct nvif_vmm *vmm, u64 addr, u64 size, u8 shift);
int nvif_vmm_raw_put(struct nvif_vmm *vmm, u64 addr, u64 size, u8 shift);
int nvif_vmm_raw_map(struct nvif_vmm *vmm, u64 addr, u64 size, u8 shift,
		     void *argv, u32 argc, struct nvif_mem *mem, u64 offset);
int nvif_vmm_raw_unmap(struct nvif_vmm *vmm, u64 addr, u64 size,
		       u8 shift, bool sparse);
int nvif_vmm_raw_sparse(struct nvif_vmm *vmm, u64 addr, u64 size, bool ref);
#endif
+19 −1
Original line number Diff line number Diff line
@@ -17,6 +17,7 @@ struct nvkm_vma {
	bool part:1; /* Region was split from an allocated region by map(). */
	bool busy:1; /* Region busy (for temporarily preventing user access). */
	bool mapped:1; /* Region contains valid pages. */
	bool no_comp:1; /* Force no memory compression. */
	struct nvkm_memory *memory; /* Memory currently mapped into VMA. */
	struct nvkm_tags *tags; /* Compression tag reference. */
};
@@ -27,10 +28,26 @@ struct nvkm_vmm {
	const char *name;
	u32 debug;
	struct kref kref;
	struct mutex mutex;

	struct {
		struct mutex vmm;
		struct mutex ref;
		struct mutex map;
	} mutex;

	u64 start;
	u64 limit;
	struct {
		struct {
			u64 addr;
			u64 size;
		} p;
		struct {
			u64 addr;
			u64 size;
		} n;
		bool raw;
	} managed;

	struct nvkm_vmm_pt *pd;
	struct list_head join;
@@ -70,6 +87,7 @@ struct nvkm_vmm_map {

	const struct nvkm_vmm_page *page;

	bool no_comp;
	struct nvkm_tags *tags;
	u64 next;
	u64 type;
+1 −1
Original line number Diff line number Diff line
@@ -350,7 +350,7 @@ nouveau_svmm_init(struct drm_device *dev, void *data,
	 * VMM instead of the standard one.
	 */
	ret = nvif_vmm_ctor(&cli->mmu, "svmVmm",
			    cli->vmm.vmm.object.oclass, true,
			    cli->vmm.vmm.object.oclass, MANAGED,
			    args->unmanaged_addr, args->unmanaged_size,
			    &(struct gp100_vmm_v0) {
				.fault_replay = true,
+2 −2
Original line number Diff line number Diff line
@@ -128,8 +128,8 @@ nouveau_vmm_fini(struct nouveau_vmm *vmm)
int
nouveau_vmm_init(struct nouveau_cli *cli, s32 oclass, struct nouveau_vmm *vmm)
{
	int ret = nvif_vmm_ctor(&cli->mmu, "drmVmm", oclass, false, PAGE_SIZE,
				0, NULL, 0, &vmm->vmm);
	int ret = nvif_vmm_ctor(&cli->mmu, "drmVmm", oclass, UNMANAGED,
				PAGE_SIZE, 0, NULL, 0, &vmm->vmm);
	if (ret)
		return ret;

Loading