Commit f5245a5f authored by David Lechner's avatar David Lechner Committed by Greg Kroah-Hartman
Browse files

counter: drop chrdev_lock



This removes the chrdev_lock from the counter subsystem. This was
intended to prevent opening the chrdev more than once. However, this
doesn't work in practice since userspace can duplicate file descriptors
and pass file descriptors to other processes. Since this protection
can't be relied on, it is best to just remove it.

Suggested-by: default avatarGreg KH <gregkh@linuxfoundation.org>
Acked-by: default avatarWilliam Breathitt Gray <vilhelm.gray@gmail.com>
Signed-off-by: default avatarDavid Lechner <david@lechnology.com>
Link: https://lore.kernel.org/r/20211017185521.3468640-1-david@lechnology.com


Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent c3ed761c
Loading
Loading
Loading
Loading
+0 −6
Original line number Diff line number Diff line
@@ -384,10 +384,6 @@ static int counter_chrdev_open(struct inode *inode, struct file *filp)
							    typeof(*counter),
							    chrdev);

	/* Ensure chrdev is not opened more than 1 at a time */
	if (!atomic_add_unless(&counter->chrdev_lock, 1, 1))
		return -EBUSY;

	get_device(&counter->dev);
	filp->private_data = counter;

@@ -419,7 +415,6 @@ static int counter_chrdev_release(struct inode *inode, struct file *filp)
	mutex_unlock(&counter->ops_exist_lock);

	put_device(&counter->dev);
	atomic_dec(&counter->chrdev_lock);

	return ret;
}
@@ -445,7 +440,6 @@ int counter_chrdev_add(struct counter_device *const counter)
	mutex_init(&counter->events_lock);

	/* Initialize character device */
	atomic_set(&counter->chrdev_lock, 0);
	cdev_init(&counter->chrdev, &counter_fops);

	/* Allocate Counter events queue */
+3 −10
Original line number Diff line number Diff line
@@ -796,25 +796,18 @@ static int counter_events_queue_size_write(struct counter_device *counter,
					   u64 val)
{
	DECLARE_KFIFO_PTR(events, struct counter_event);
	int err = 0;

	/* Ensure chrdev is not opened more than 1 at a time */
	if (!atomic_add_unless(&counter->chrdev_lock, 1, 1))
		return -EBUSY;
	int err;

	/* Allocate new events queue */
	err = kfifo_alloc(&events, val, GFP_KERNEL);
	if (err)
		goto exit_early;
		return err;

	/* Swap in new events queue */
	kfifo_free(&counter->events);
	counter->events.kfifo = events.kfifo;

exit_early:
	atomic_dec(&counter->chrdev_lock);

	return err;
	return 0;
}

static struct counter_comp counter_num_signals_comp =
+0 −7
Original line number Diff line number Diff line
@@ -297,7 +297,6 @@ struct counter_ops {
 * @events:		queue of detected Counter events
 * @events_wait:	wait queue to allow blocking reads of Counter events
 * @events_lock:	lock to protect Counter events queue read operations
 * @chrdev_lock:	lock to limit chrdev to a single open at a time
 * @ops_exist_lock:	lock to prevent use during removal
 */
struct counter_device {
@@ -325,12 +324,6 @@ struct counter_device {
	DECLARE_KFIFO_PTR(events, struct counter_event);
	wait_queue_head_t events_wait;
	struct mutex events_lock;
	/*
	 * chrdev_lock is locked by counter_chrdev_open() and unlocked by
	 * counter_chrdev_release(), so a mutex is not possible here because
	 * chrdev_lock will invariably be held when returning to user space
	 */
	atomic_t chrdev_lock;
	struct mutex ops_exist_lock;
};