Commit 97971df8 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull dlm updates from David Teigland:
 "These patches include the usual cleanups and minor fixes, the removal
  of code that is no longer needed due to recent improvements, and
  improvements to processing large volumes of messages during heavy
  locking activity.

  Summary:

   - Misc code cleanup

   - Fix a couple of socket handling bugs: a double release on an error
     path and a data-ready race in an accept loop

   - Remove code for resending dir-remove messages. This code is no
     longer needed since the midcomms layer now ensures the messages are
     resent if needed

   - Add tracepoints for dlm messages

   - Improve callback queueing by replacing the fixed array with a list

   - Simplify the handling of a remove message followed by a lookup
     message by sending both without releasing a spinlock in between

   - Improve the concurrency of sending and receiving messages by
     holding locks for a shorter time, and changing how workqueues are
     used

   - Remove old code for shutting down sockets, which is no longer
     needed with the reliable connection handling that was recently
     added"

* tag 'dlm-6.2' of git://git.kernel.org/pub/scm/linux/kernel/git/teigland/linux-dlm: (37 commits)
  fs: dlm: fix building without lockdep
  fs: dlm: parallelize lowcomms socket handling
  fs: dlm: don't init error value
  fs: dlm: use saved sk_error_report()
  fs: dlm: use sock2con without checking null
  fs: dlm: remove dlm_node_addrs lookup list
  fs: dlm: don't put dlm_local_addrs on heap
  fs: dlm: cleanup listen sock handling
  fs: dlm: remove socket shutdown handling
  fs: dlm: use listen sock as dlm running indicator
  fs: dlm: use list_first_entry_or_null
  fs: dlm: remove twice INIT_WORK
  fs: dlm: add midcomms init/start functions
  fs: dlm: add dst nodeid for msg tracing
  fs: dlm: rename seq to h_seq for msg tracing
  fs: dlm: rename DLM_IFL_NEED_SCHED to DLM_IFL_CB_PENDING
  fs: dlm: ast do WARN_ON_ONCE() on hotpath
  fs: dlm: drop lkb ref in bug case
  fs: dlm: avoid false-positive checker warning
  fs: dlm: use WARN_ON_ONCE() instead of WARN_ON()
  ...
parents 56c003e4 7a5e9f1f
Loading
Loading
Loading
Loading
+137 −185
Original line number Diff line number Diff line
@@ -12,55 +12,67 @@
#include <trace/events/dlm.h>

#include "dlm_internal.h"
#include "memory.h"
#include "lock.h"
#include "user.h"
#include "ast.h"

static uint64_t dlm_cb_seq;
static DEFINE_SPINLOCK(dlm_cb_seq_spin);
void dlm_release_callback(struct kref *ref)
{
	struct dlm_callback *cb = container_of(ref, struct dlm_callback, ref);

	dlm_free_cb(cb);
}

static void dlm_dump_lkb_callbacks(struct dlm_lkb *lkb)
void dlm_callback_set_last_ptr(struct dlm_callback **from,
			       struct dlm_callback *to)
{
	int i;

	log_print("last_bast %x %llu flags %x mode %d sb %d %x",
		  lkb->lkb_id,
		  (unsigned long long)lkb->lkb_last_bast.seq,
		  lkb->lkb_last_bast.flags,
		  lkb->lkb_last_bast.mode,
		  lkb->lkb_last_bast.sb_status,
		  lkb->lkb_last_bast.sb_flags);

	log_print("last_cast %x %llu flags %x mode %d sb %d %x",
		  lkb->lkb_id,
		  (unsigned long long)lkb->lkb_last_cast.seq,
		  lkb->lkb_last_cast.flags,
		  lkb->lkb_last_cast.mode,
		  lkb->lkb_last_cast.sb_status,
		  lkb->lkb_last_cast.sb_flags);

	for (i = 0; i < DLM_CALLBACKS_SIZE; i++) {
		log_print("cb %x %llu flags %x mode %d sb %d %x",
			  lkb->lkb_id,
			  (unsigned long long)lkb->lkb_callbacks[i].seq,
			  lkb->lkb_callbacks[i].flags,
			  lkb->lkb_callbacks[i].mode,
			  lkb->lkb_callbacks[i].sb_status,
			  lkb->lkb_callbacks[i].sb_flags);
	if (*from)
		kref_put(&(*from)->ref, dlm_release_callback);

	if (to)
		kref_get(&to->ref);

	*from = to;
}

void dlm_purge_lkb_callbacks(struct dlm_lkb *lkb)
{
	struct dlm_callback *cb, *safe;

	list_for_each_entry_safe(cb, safe, &lkb->lkb_callbacks, list) {
		list_del(&cb->list);
		kref_put(&cb->ref, dlm_release_callback);
	}

int dlm_add_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
			 int status, uint32_t sbflags, uint64_t seq)
	lkb->lkb_flags &= ~DLM_IFL_CB_PENDING;

	/* invalidate */
	dlm_callback_set_last_ptr(&lkb->lkb_last_cast, NULL);
	dlm_callback_set_last_ptr(&lkb->lkb_last_cb, NULL);
	lkb->lkb_last_bast_mode = -1;
}

int dlm_enqueue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
			     int status, uint32_t sbflags)
{
	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
	uint64_t prev_seq;
	int rv = DLM_ENQUEUE_CALLBACK_SUCCESS;
	struct dlm_callback *cb;
	int prev_mode;
	int i, rv;

	for (i = 0; i < DLM_CALLBACKS_SIZE; i++) {
		if (lkb->lkb_callbacks[i].seq)
			continue;
	if (flags & DLM_CB_BAST) {
		/* if cb is a bast, it should be skipped if the blocking mode is
		 * compatible with the last granted mode
		 */
		if (lkb->lkb_last_cast) {
			if (dlm_modes_compat(mode, lkb->lkb_last_cast->mode)) {
				log_debug(ls, "skip %x bast mode %d for cast mode %d",
					  lkb->lkb_id, mode,
					  lkb->lkb_last_cast->mode);
				goto out;
			}
		}

		/*
		 * Suppress some redundant basts here, do more on removal.
@@ -68,148 +80,95 @@ int dlm_add_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
		 * is a bast for the same mode or a more restrictive mode.
		 * (the addional > PR check is needed for PR/CW inversion)
		 */

		if ((i > 0) && (flags & DLM_CB_BAST) &&
		    (lkb->lkb_callbacks[i-1].flags & DLM_CB_BAST)) {

			prev_seq = lkb->lkb_callbacks[i-1].seq;
			prev_mode = lkb->lkb_callbacks[i-1].mode;
		if (lkb->lkb_last_cb && lkb->lkb_last_cb->flags & DLM_CB_BAST) {
			prev_mode = lkb->lkb_last_cb->mode;

			if ((prev_mode == mode) ||
			    (prev_mode > mode && prev_mode > DLM_LOCK_PR)) {

				log_debug(ls, "skip %x add bast %llu mode %d "
					  "for bast %llu mode %d",
					  lkb->lkb_id,
					  (unsigned long long)seq,
					  mode,
					  (unsigned long long)prev_seq,
					  prev_mode);
				rv = 0;
				log_debug(ls, "skip %x add bast mode %d for bast mode %d",
					  lkb->lkb_id, mode, prev_mode);
				goto out;
			}
		}

		lkb->lkb_callbacks[i].seq = seq;
		lkb->lkb_callbacks[i].flags = flags;
		lkb->lkb_callbacks[i].mode = mode;
		lkb->lkb_callbacks[i].sb_status = status;
		lkb->lkb_callbacks[i].sb_flags = (sbflags & 0x000000FF);
		rv = 0;
		break;
	}

	if (i == DLM_CALLBACKS_SIZE) {
		log_error(ls, "no callbacks %x %llu flags %x mode %d sb %d %x",
			  lkb->lkb_id, (unsigned long long)seq,
			  flags, mode, status, sbflags);
		dlm_dump_lkb_callbacks(lkb);
		rv = -1;
	cb = dlm_allocate_cb();
	if (!cb) {
		rv = DLM_ENQUEUE_CALLBACK_FAILURE;
		goto out;
	}
 out:
	return rv;
}

int dlm_rem_lkb_callback(struct dlm_ls *ls, struct dlm_lkb *lkb,
			 struct dlm_callback *cb, int *resid)
{
	int i, rv;

	*resid = 0;

	if (!lkb->lkb_callbacks[0].seq) {
		rv = -ENOENT;
		goto out;
	cb->flags = flags;
	cb->mode = mode;
	cb->sb_status = status;
	cb->sb_flags = (sbflags & 0x000000FF);
	kref_init(&cb->ref);
	if (!(lkb->lkb_flags & DLM_IFL_CB_PENDING)) {
		lkb->lkb_flags |= DLM_IFL_CB_PENDING;
		rv = DLM_ENQUEUE_CALLBACK_NEED_SCHED;
	}
	list_add_tail(&cb->list, &lkb->lkb_callbacks);

	/* oldest undelivered cb is callbacks[0] */
	if (flags & DLM_CB_CAST)
		dlm_callback_set_last_ptr(&lkb->lkb_last_cast, cb);

	memcpy(cb, &lkb->lkb_callbacks[0], sizeof(struct dlm_callback));
	memset(&lkb->lkb_callbacks[0], 0, sizeof(struct dlm_callback));
	dlm_callback_set_last_ptr(&lkb->lkb_last_cb, cb);

	/* shift others down */

	for (i = 1; i < DLM_CALLBACKS_SIZE; i++) {
		if (!lkb->lkb_callbacks[i].seq)
			break;
		memcpy(&lkb->lkb_callbacks[i-1], &lkb->lkb_callbacks[i],
		       sizeof(struct dlm_callback));
		memset(&lkb->lkb_callbacks[i], 0, sizeof(struct dlm_callback));
		(*resid)++;
 out:
	return rv;
}

	/* if cb is a bast, it should be skipped if the blocking mode is
	   compatible with the last granted mode */

	if ((cb->flags & DLM_CB_BAST) && lkb->lkb_last_cast.seq) {
		if (dlm_modes_compat(cb->mode, lkb->lkb_last_cast.mode)) {
			cb->flags |= DLM_CB_SKIP;

			log_debug(ls, "skip %x bast %llu mode %d "
				  "for cast %llu mode %d",
				  lkb->lkb_id,
				  (unsigned long long)cb->seq,
				  cb->mode,
				  (unsigned long long)lkb->lkb_last_cast.seq,
				  lkb->lkb_last_cast.mode);
			rv = 0;
			goto out;
		}
	}
int dlm_dequeue_lkb_callback(struct dlm_lkb *lkb, struct dlm_callback **cb)
{
	/* oldest undelivered cb is callbacks first entry */
	*cb = list_first_entry_or_null(&lkb->lkb_callbacks,
				       struct dlm_callback, list);
	if (!*cb)
		return DLM_DEQUEUE_CALLBACK_EMPTY;

	if (cb->flags & DLM_CB_CAST) {
		memcpy(&lkb->lkb_last_cast, cb, sizeof(struct dlm_callback));
		lkb->lkb_last_cast_time = ktime_get();
	}
	/* remove it from callbacks so shift others down */
	list_del(&(*cb)->list);
	if (list_empty(&lkb->lkb_callbacks))
		return DLM_DEQUEUE_CALLBACK_LAST;

	if (cb->flags & DLM_CB_BAST) {
		memcpy(&lkb->lkb_last_bast, cb, sizeof(struct dlm_callback));
		lkb->lkb_last_bast_time = ktime_get();
	}
	rv = 0;
 out:
	return rv;
	return DLM_DEQUEUE_CALLBACK_SUCCESS;
}

void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status,
		uint32_t sbflags)
{
	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
	uint64_t new_seq, prev_seq;
	int rv;

	spin_lock(&dlm_cb_seq_spin);
	new_seq = ++dlm_cb_seq;
	if (!dlm_cb_seq)
		new_seq = ++dlm_cb_seq;
	spin_unlock(&dlm_cb_seq_spin);

	if (lkb->lkb_flags & DLM_IFL_USER) {
		dlm_user_add_ast(lkb, flags, mode, status, sbflags, new_seq);
		dlm_user_add_ast(lkb, flags, mode, status, sbflags);
		return;
	}

	mutex_lock(&lkb->lkb_cb_mutex);
	prev_seq = lkb->lkb_callbacks[0].seq;

	rv = dlm_add_lkb_callback(lkb, flags, mode, status, sbflags, new_seq);
	if (rv < 0)
		goto out;

	if (!prev_seq) {
	spin_lock(&lkb->lkb_cb_lock);
	rv = dlm_enqueue_lkb_callback(lkb, flags, mode, status, sbflags);
	switch (rv) {
	case DLM_ENQUEUE_CALLBACK_NEED_SCHED:
		kref_get(&lkb->lkb_ref);

		mutex_lock(&ls->ls_cb_mutex);
		spin_lock(&ls->ls_cb_lock);
		if (test_bit(LSFL_CB_DELAY, &ls->ls_flags)) {
			list_add(&lkb->lkb_cb_list, &ls->ls_cb_delay);
		} else {
			queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work);
		}
		mutex_unlock(&ls->ls_cb_mutex);
		spin_unlock(&ls->ls_cb_lock);
		break;
	case DLM_ENQUEUE_CALLBACK_FAILURE:
		WARN_ON_ONCE(1);
		break;
	case DLM_ENQUEUE_CALLBACK_SUCCESS:
		break;
	default:
		WARN_ON_ONCE(1);
		break;
	}
 out:
	mutex_unlock(&lkb->lkb_cb_mutex);
	spin_unlock(&lkb->lkb_cb_lock);
}

void dlm_callback_work(struct work_struct *work)
@@ -218,53 +177,46 @@ void dlm_callback_work(struct work_struct *work)
	struct dlm_ls *ls = lkb->lkb_resource->res_ls;
	void (*castfn) (void *astparam);
	void (*bastfn) (void *astparam, int mode);
	struct dlm_callback callbacks[DLM_CALLBACKS_SIZE];
	int i, rv, resid;

	memset(&callbacks, 0, sizeof(callbacks));

	mutex_lock(&lkb->lkb_cb_mutex);
	if (!lkb->lkb_callbacks[0].seq) {
		/* no callback work exists, shouldn't happen */
		log_error(ls, "dlm_callback_work %x no work", lkb->lkb_id);
		dlm_print_lkb(lkb);
		dlm_dump_lkb_callbacks(lkb);
	}
	struct dlm_callback *cb;
	int rv;

	for (i = 0; i < DLM_CALLBACKS_SIZE; i++) {
		rv = dlm_rem_lkb_callback(ls, lkb, &callbacks[i], &resid);
		if (rv < 0)
			break;
	}
	spin_lock(&lkb->lkb_cb_lock);
	rv = dlm_dequeue_lkb_callback(lkb, &cb);
	spin_unlock(&lkb->lkb_cb_lock);

	if (resid) {
		/* cbs remain, loop should have removed all, shouldn't happen */
		log_error(ls, "dlm_callback_work %x resid %d", lkb->lkb_id,
			  resid);
		dlm_print_lkb(lkb);
		dlm_dump_lkb_callbacks(lkb);
	}
	mutex_unlock(&lkb->lkb_cb_mutex);
	if (WARN_ON_ONCE(rv == DLM_DEQUEUE_CALLBACK_EMPTY))
		goto out;

	for (;;) {
		castfn = lkb->lkb_astfn;
		bastfn = lkb->lkb_bastfn;

	for (i = 0; i < DLM_CALLBACKS_SIZE; i++) {
		if (!callbacks[i].seq)
			break;
		if (callbacks[i].flags & DLM_CB_SKIP) {
			continue;
		} else if (callbacks[i].flags & DLM_CB_BAST) {
			trace_dlm_bast(ls, lkb, callbacks[i].mode);
			bastfn(lkb->lkb_astparam, callbacks[i].mode);
		} else if (callbacks[i].flags & DLM_CB_CAST) {
			lkb->lkb_lksb->sb_status = callbacks[i].sb_status;
			lkb->lkb_lksb->sb_flags = callbacks[i].sb_flags;
		if (cb->flags & DLM_CB_BAST) {
			trace_dlm_bast(ls, lkb, cb->mode);
			lkb->lkb_last_bast_time = ktime_get();
			lkb->lkb_last_bast_mode = cb->mode;
			bastfn(lkb->lkb_astparam, cb->mode);
		} else if (cb->flags & DLM_CB_CAST) {
			lkb->lkb_lksb->sb_status = cb->sb_status;
			lkb->lkb_lksb->sb_flags = cb->sb_flags;
			trace_dlm_ast(ls, lkb);
			lkb->lkb_last_cast_time = ktime_get();
			castfn(lkb->lkb_astparam);
		}

		kref_put(&cb->ref, dlm_release_callback);

		spin_lock(&lkb->lkb_cb_lock);
		rv = dlm_dequeue_lkb_callback(lkb, &cb);
		if (rv == DLM_DEQUEUE_CALLBACK_EMPTY) {
			lkb->lkb_flags &= ~DLM_IFL_CB_PENDING;
			spin_unlock(&lkb->lkb_cb_lock);
			break;
		}
		spin_unlock(&lkb->lkb_cb_lock);
	}

out:
	/* undo kref_get from dlm_add_callback, may cause lkb to be freed */
	dlm_put_lkb(lkb);
}
@@ -289,9 +241,9 @@ void dlm_callback_stop(struct dlm_ls *ls)
void dlm_callback_suspend(struct dlm_ls *ls)
{
	if (ls->ls_callback_wq) {
		mutex_lock(&ls->ls_cb_mutex);
		spin_lock(&ls->ls_cb_lock);
		set_bit(LSFL_CB_DELAY, &ls->ls_flags);
		mutex_unlock(&ls->ls_cb_mutex);
		spin_unlock(&ls->ls_cb_lock);

		flush_workqueue(ls->ls_callback_wq);
	}
@@ -308,10 +260,8 @@ void dlm_callback_resume(struct dlm_ls *ls)
	if (!ls->ls_callback_wq)
		return;

	clear_bit(LSFL_CB_DELAY, &ls->ls_flags);

more:
	mutex_lock(&ls->ls_cb_mutex);
	spin_lock(&ls->ls_cb_lock);
	list_for_each_entry_safe(lkb, safe, &ls->ls_cb_delay, lkb_cb_list) {
		list_del_init(&lkb->lkb_cb_list);
		queue_work(ls->ls_callback_wq, &lkb->lkb_cb_work);
@@ -320,7 +270,9 @@ void dlm_callback_resume(struct dlm_ls *ls)
			break;
	}
	empty = list_empty(&ls->ls_cb_delay);
	mutex_unlock(&ls->ls_cb_mutex);
	if (empty)
		clear_bit(LSFL_CB_DELAY, &ls->ls_flags);
	spin_unlock(&ls->ls_cb_lock);

	sum += count;
	if (!empty) {
+13 −4
Original line number Diff line number Diff line
@@ -11,13 +11,22 @@
#ifndef __ASTD_DOT_H__
#define __ASTD_DOT_H__

int dlm_add_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
                         int status, uint32_t sbflags, uint64_t seq);
int dlm_rem_lkb_callback(struct dlm_ls *ls, struct dlm_lkb *lkb,
                         struct dlm_callback *cb, int *resid);
#define DLM_ENQUEUE_CALLBACK_NEED_SCHED	1
#define DLM_ENQUEUE_CALLBACK_SUCCESS	0
#define DLM_ENQUEUE_CALLBACK_FAILURE	-1
int dlm_enqueue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
			     int status, uint32_t sbflags);
#define DLM_DEQUEUE_CALLBACK_EMPTY	2
#define DLM_DEQUEUE_CALLBACK_LAST	1
#define DLM_DEQUEUE_CALLBACK_SUCCESS	0
int dlm_dequeue_lkb_callback(struct dlm_lkb *lkb, struct dlm_callback **cb);
void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status,
                uint32_t sbflags);
void dlm_callback_set_last_ptr(struct dlm_callback **from,
			       struct dlm_callback *to);

void dlm_release_callback(struct kref *ref);
void dlm_purge_lkb_callbacks(struct dlm_lkb *lkb);
void dlm_callback_work(struct work_struct *work);
int dlm_callback_start(struct dlm_ls *ls);
void dlm_callback_stop(struct dlm_ls *ls);
+2 −2
Original line number Diff line number Diff line
@@ -183,7 +183,7 @@ static int dlm_check_protocol_and_dlm_running(unsigned int x)
		return -EINVAL;
	}

	if (dlm_allow_conn)
	if (dlm_lowcomms_is_running())
		return -EBUSY;

	return 0;
@@ -194,7 +194,7 @@ static int dlm_check_zero_and_dlm_running(unsigned int x)
	if (!x)
		return -EINVAL;

	if (dlm_allow_conn)
	if (dlm_lowcomms_is_running())
		return -EBUSY;

	return 0;
+1 −1
Original line number Diff line number Diff line
@@ -246,7 +246,7 @@ static void print_format3_lock(struct seq_file *s, struct dlm_lkb *lkb,
		   lkb->lkb_status,
		   lkb->lkb_grmode,
		   lkb->lkb_rqmode,
		   lkb->lkb_last_bast.mode,
		   lkb->lkb_last_bast_mode,
		   rsb_lookup,
		   lkb->lkb_wait_type,
		   lkb->lkb_lvbseq,
+11 −14
Original line number Diff line number Diff line
@@ -211,6 +211,7 @@ struct dlm_args {
#endif
#define DLM_IFL_DEADLOCK_CANCEL	0x01000000
#define DLM_IFL_STUB_MS		0x02000000 /* magic number for m_flags */
#define DLM_IFL_CB_PENDING	0x04000000
/* least significant 2 bytes are message changed, they are full transmitted
 * but at receive side only the 2 bytes LSB will be set.
 *
@@ -222,18 +223,17 @@ struct dlm_args {
#define DLM_IFL_USER		0x00000001
#define DLM_IFL_ORPHAN		0x00000002

#define DLM_CALLBACKS_SIZE	6

#define DLM_CB_CAST		0x00000001
#define DLM_CB_BAST		0x00000002
#define DLM_CB_SKIP		0x00000004

struct dlm_callback {
	uint64_t		seq;
	uint32_t		flags;		/* DLM_CBF_ */
	int			sb_status;	/* copy to lksb status */
	uint8_t			sb_flags;	/* copy to lksb flags */
	int8_t			mode; /* rq mode of bast, gr mode of cast */

	struct list_head	list;
	struct kref		ref;
};

struct dlm_lkb {
@@ -268,12 +268,13 @@ struct dlm_lkb {
	unsigned long		lkb_timeout_cs;
#endif

	struct mutex		lkb_cb_mutex;
	spinlock_t		lkb_cb_lock;
	struct work_struct	lkb_cb_work;
	struct list_head	lkb_cb_list; /* for ls_cb_delay or proc->asts */
	struct dlm_callback	lkb_callbacks[DLM_CALLBACKS_SIZE];
	struct dlm_callback	lkb_last_cast;
	struct dlm_callback	lkb_last_bast;
	struct list_head	lkb_callbacks;
	struct dlm_callback	*lkb_last_cast;
	struct dlm_callback	*lkb_last_cb;
	int			lkb_last_bast_mode;
	ktime_t			lkb_last_cast_time;	/* for debugging */
	ktime_t			lkb_last_bast_time;	/* for debugging */

@@ -591,11 +592,7 @@ struct dlm_ls {
	int			ls_new_rsb_count;
	struct list_head	ls_new_rsb;	/* new rsb structs */

	spinlock_t		ls_remove_spin;
	wait_queue_head_t	ls_remove_wait;
	char			ls_remove_name[DLM_RESNAME_MAXLEN+1];
	char			*ls_remove_names[DLM_REMOVE_NAMES_MAX];
	int			ls_remove_len;
	int			ls_remove_lens[DLM_REMOVE_NAMES_MAX];

	struct list_head	ls_nodes;	/* current nodes in ls */
@@ -631,7 +628,7 @@ struct dlm_ls {

	/* recovery related */

	struct mutex		ls_cb_mutex;
	spinlock_t		ls_cb_lock;
	struct list_head	ls_cb_delay; /* save for queue_work later */
	struct timer_list	ls_timer;
	struct task_struct	*ls_recoverd_task;
@@ -670,7 +667,7 @@ struct dlm_ls {
	void			*ls_ops_arg;

	int			ls_namelen;
	char			ls_name[1];
	char			ls_name[DLM_LOCKSPACE_LEN + 1];
};

/*
Loading