Commit 3fa116e8 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull powerpc fixes from Michael Ellerman:
 "A few powerpc fixes"

* tag 'powerpc-3.19-4' of git://git.kernel.org/pub/scm/linux/kernel/git/mpe/linux:
  powerpc: Work around gcc bug in current_thread_info()
  cxl: Fix issues when unmapping contexts
  powernv: Fix OPAL tracepoint code
parents f800c25b a87e810f
Loading
Loading
Loading
Loading
+7 −6
Original line number Diff line number Diff line
@@ -23,9 +23,9 @@
#define THREAD_SIZE		(1 << THREAD_SHIFT)

#ifdef CONFIG_PPC64
#define CURRENT_THREAD_INFO(dest, sp)	clrrdi dest, sp, THREAD_SHIFT
#define CURRENT_THREAD_INFO(dest, sp)	stringify_in_c(clrrdi dest, sp, THREAD_SHIFT)
#else
#define CURRENT_THREAD_INFO(dest, sp)	rlwinm dest, sp, 0, 0, 31-THREAD_SHIFT
#define CURRENT_THREAD_INFO(dest, sp)	stringify_in_c(rlwinm dest, sp, 0, 0, 31-THREAD_SHIFT)
#endif

#ifndef __ASSEMBLY__
@@ -71,12 +71,13 @@ struct thread_info {
#define THREAD_SIZE_ORDER	(THREAD_SHIFT - PAGE_SHIFT)

/* how to get the thread information struct from C */
register unsigned long __current_r1 asm("r1");
static inline struct thread_info *current_thread_info(void)
{
	/* gcc4, at least, is smart enough to turn this into a single
	 * rlwinm for ppc32 and clrrdi for ppc64 */
	return (struct thread_info *)(__current_r1 & ~(THREAD_SIZE-1));
	unsigned long val;

	asm (CURRENT_THREAD_INFO(%0,1) : "=r" (val));

	return (struct thread_info *)val;
}

#endif /* __ASSEMBLY__ */
+0 −1
Original line number Diff line number Diff line
@@ -40,7 +40,6 @@ BEGIN_FTR_SECTION; \
	b	1f;						\
END_FTR_SECTION(0, 1);						\
	ld	r12,opal_tracepoint_refcount@toc(r2);		\
	std	r12,32(r1);					\
	cmpdi	r12,0;						\
	bne-	LABEL;						\
1:
+63 −19
Original line number Diff line number Diff line
@@ -100,6 +100,46 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
	return 0;
}

static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
	struct cxl_context *ctx = vma->vm_file->private_data;
	unsigned long address = (unsigned long)vmf->virtual_address;
	u64 area, offset;

	offset = vmf->pgoff << PAGE_SHIFT;

	pr_devel("%s: pe: %i address: 0x%lx offset: 0x%llx\n",
			__func__, ctx->pe, address, offset);

	if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
		area = ctx->afu->psn_phys;
		if (offset > ctx->afu->adapter->ps_size)
			return VM_FAULT_SIGBUS;
	} else {
		area = ctx->psn_phys;
		if (offset > ctx->psn_size)
			return VM_FAULT_SIGBUS;
	}

	mutex_lock(&ctx->status_mutex);

	if (ctx->status != STARTED) {
		mutex_unlock(&ctx->status_mutex);
		pr_devel("%s: Context not started, failing problem state access\n", __func__);
		return VM_FAULT_SIGBUS;
	}

	vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);

	mutex_unlock(&ctx->status_mutex);

	return VM_FAULT_NOPAGE;
}

static const struct vm_operations_struct cxl_mmap_vmops = {
	.fault = cxl_mmap_fault,
};

/*
 * Map a per-context mmio space into the given vma.
 */
@@ -108,11 +148,7 @@ int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
	u64 len = vma->vm_end - vma->vm_start;
	len = min(len, ctx->psn_size);

	if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
		return vm_iomap_memory(vma, ctx->afu->psn_phys, ctx->afu->adapter->ps_size);
	}

	if (ctx->afu->current_mode != CXL_MODE_DEDICATED) {
		/* make sure there is a valid per process space for this AFU */
		if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) {
			pr_devel("AFU doesn't support mmio space\n");
@@ -122,12 +158,15 @@ int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
		/* Can't mmap until the AFU is enabled */
		if (!ctx->afu->enabled)
			return -EBUSY;
	}

	pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__,
		 ctx->psn_phys, ctx->pe , ctx->master);

	vma->vm_flags |= VM_IO | VM_PFNMAP;
	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
	return vm_iomap_memory(vma, ctx->psn_phys, len);
	vma->vm_ops = &cxl_mmap_vmops;
	return 0;
}

/*
@@ -150,12 +189,6 @@ static void __detach_context(struct cxl_context *ctx)
	afu_release_irqs(ctx);
	flush_work(&ctx->fault_work); /* Only needed for dedicated process */
	wake_up_all(&ctx->wq);

	/* Release Problem State Area mapping */
	mutex_lock(&ctx->mapping_lock);
	if (ctx->mapping)
		unmap_mapping_range(ctx->mapping, 0, 0, 1);
	mutex_unlock(&ctx->mapping_lock);
}

/*
@@ -184,6 +217,17 @@ void cxl_context_detach_all(struct cxl_afu *afu)
		 * created and torn down after the IDR removed
		 */
		__detach_context(ctx);

		/*
		 * We are force detaching - remove any active PSA mappings so
		 * userspace cannot interfere with the card if it comes back.
		 * Easiest way to exercise this is to unbind and rebind the
		 * driver via sysfs while it is in use.
		 */
		mutex_lock(&ctx->mapping_lock);
		if (ctx->mapping)
			unmap_mapping_range(ctx->mapping, 0, 0, 1);
		mutex_unlock(&ctx->mapping_lock);
	}
	mutex_unlock(&afu->contexts_lock);
}
+8 −6
Original line number Diff line number Diff line
@@ -140,18 +140,20 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,

	pr_devel("%s: pe: %i\n", __func__, ctx->pe);

	mutex_lock(&ctx->status_mutex);
	if (ctx->status != OPENED) {
		rc = -EIO;
		goto out;
	}

	/* Do this outside the status_mutex to avoid a circular dependency with
	 * the locking in cxl_mmap_fault() */
	if (copy_from_user(&work, uwork,
			   sizeof(struct cxl_ioctl_start_work))) {
		rc = -EFAULT;
		goto out;
	}

	mutex_lock(&ctx->status_mutex);
	if (ctx->status != OPENED) {
		rc = -EIO;
		goto out;
	}

	/*
	 * if any of the reserved fields are set or any of the unused
	 * flags are set it's invalid