Commit b30ab791 authored by Andreas Gruenbacher's avatar Andreas Gruenbacher Committed by Philipp Reisner
Browse files

drbd: Rename "mdev" to "device"



sed -i -e 's:mdev:device:g'

Signed-off-by: default avatarAndreas Gruenbacher <agruen@linbit.com>
Signed-off-by: default avatarPhilipp Reisner <philipp.reisner@linbit.com>
parent 54761697
Loading
Loading
Loading
Loading
+272 −272

File changed.

Preview size limit exceeded, changes collapsed.

+147 −147
Original line number Diff line number Diff line
@@ -113,20 +113,20 @@ struct drbd_bitmap {
};

#define bm_print_lock_info(m) __bm_print_lock_info(m, __func__)
static void __bm_print_lock_info(struct drbd_device *mdev, const char *func)
static void __bm_print_lock_info(struct drbd_device *device, const char *func)
{
	struct drbd_bitmap *b = mdev->bitmap;
	struct drbd_bitmap *b = device->bitmap;
	if (!__ratelimit(&drbd_ratelimit_state))
		return;
	dev_err(DEV, "FIXME %s in %s, bitmap locked for '%s' by %s\n",
		drbd_task_to_thread_name(mdev->tconn, current),
		drbd_task_to_thread_name(device->tconn, current),
		func, b->bm_why ?: "?",
		drbd_task_to_thread_name(mdev->tconn, b->bm_task));
		drbd_task_to_thread_name(device->tconn, b->bm_task));
}

void drbd_bm_lock(struct drbd_device *mdev, char *why, enum bm_flag flags)
void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags)
{
	struct drbd_bitmap *b = mdev->bitmap;
	struct drbd_bitmap *b = device->bitmap;
	int trylock_failed;

	if (!b) {
@@ -138,9 +138,9 @@ void drbd_bm_lock(struct drbd_device *mdev, char *why, enum bm_flag flags)

	if (trylock_failed) {
		dev_warn(DEV, "%s going to '%s' but bitmap already locked for '%s' by %s\n",
			 drbd_task_to_thread_name(mdev->tconn, current),
			 drbd_task_to_thread_name(device->tconn, current),
			 why, b->bm_why ?: "?",
			 drbd_task_to_thread_name(mdev->tconn, b->bm_task));
			 drbd_task_to_thread_name(device->tconn, b->bm_task));
		mutex_lock(&b->bm_change);
	}
	if (BM_LOCKED_MASK & b->bm_flags)
@@ -151,15 +151,15 @@ void drbd_bm_lock(struct drbd_device *mdev, char *why, enum bm_flag flags)
	b->bm_task = current;
}

void drbd_bm_unlock(struct drbd_device *mdev)
void drbd_bm_unlock(struct drbd_device *device)
{
	struct drbd_bitmap *b = mdev->bitmap;
	struct drbd_bitmap *b = device->bitmap;
	if (!b) {
		dev_err(DEV, "FIXME no bitmap in drbd_bm_unlock!?\n");
		return;
	}

	if (!(BM_LOCKED_MASK & mdev->bitmap->bm_flags))
	if (!(BM_LOCKED_MASK & device->bitmap->bm_flags))
		dev_err(DEV, "FIXME bitmap not locked in bm_unlock\n");

	b->bm_flags &= ~BM_LOCKED_MASK;
@@ -211,19 +211,19 @@ static unsigned long bm_page_to_idx(struct page *page)
/* As is very unlikely that the same page is under IO from more than one
 * context, we can get away with a bit per page and one wait queue per bitmap.
 */
static void bm_page_lock_io(struct drbd_device *mdev, int page_nr)
static void bm_page_lock_io(struct drbd_device *device, int page_nr)
{
	struct drbd_bitmap *b = mdev->bitmap;
	struct drbd_bitmap *b = device->bitmap;
	void *addr = &page_private(b->bm_pages[page_nr]);
	wait_event(b->bm_io_wait, !test_and_set_bit(BM_PAGE_IO_LOCK, addr));
}

static void bm_page_unlock_io(struct drbd_device *mdev, int page_nr)
static void bm_page_unlock_io(struct drbd_device *device, int page_nr)
{
	struct drbd_bitmap *b = mdev->bitmap;
	struct drbd_bitmap *b = device->bitmap;
	void *addr = &page_private(b->bm_pages[page_nr]);
	clear_bit_unlock(BM_PAGE_IO_LOCK, addr);
	wake_up(&mdev->bitmap->bm_io_wait);
	wake_up(&device->bitmap->bm_io_wait);
}

/* set _before_ submit_io, so it may be reset due to being changed
@@ -242,22 +242,22 @@ static void bm_set_page_need_writeout(struct page *page)

/**
 * drbd_bm_mark_for_writeout() - mark a page with a "hint" to be considered for writeout
 * @mdev:	DRBD device.
 * @device:	DRBD device.
 * @page_nr:	the bitmap page to mark with the "hint" flag
 *
 * From within an activity log transaction, we mark a few pages with these
 * hints, then call drbd_bm_write_hinted(), which will only write out changed
 * pages which are flagged with this mark.
 */
void drbd_bm_mark_for_writeout(struct drbd_device *mdev, int page_nr)
void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr)
{
	struct page *page;
	if (page_nr >= mdev->bitmap->bm_number_of_pages) {
	if (page_nr >= device->bitmap->bm_number_of_pages) {
		dev_warn(DEV, "BAD: page_nr: %u, number_of_pages: %u\n",
			 page_nr, (int)mdev->bitmap->bm_number_of_pages);
			 page_nr, (int)device->bitmap->bm_number_of_pages);
		return;
	}
	page = mdev->bitmap->bm_pages[page_nr];
	page = device->bitmap->bm_pages[page_nr];
	set_bit(BM_PAGE_HINT_WRITEOUT, &page_private(page));
}

@@ -340,7 +340,7 @@ static void bm_unmap(unsigned long *p_addr)

/*
 * actually most functions herein should take a struct drbd_bitmap*, not a
 * struct drbd_device*, but for the debug macros I like to have the mdev around
 * struct drbd_device*, but for the debug macros I like to have the device around
 * to be able to report device specific.
 */

@@ -436,11 +436,11 @@ static struct page **bm_realloc_pages(struct drbd_bitmap *b, unsigned long want)

/*
 * called on driver init only. TODO call when a device is created.
 * allocates the drbd_bitmap, and stores it in mdev->bitmap.
 * allocates the drbd_bitmap, and stores it in device->bitmap.
 */
int drbd_bm_init(struct drbd_device *mdev)
int drbd_bm_init(struct drbd_device *device)
{
	struct drbd_bitmap *b = mdev->bitmap;
	struct drbd_bitmap *b = device->bitmap;
	WARN_ON(b != NULL);
	b = kzalloc(sizeof(struct drbd_bitmap), GFP_KERNEL);
	if (!b)
@@ -449,28 +449,28 @@ int drbd_bm_init(struct drbd_device *mdev)
	mutex_init(&b->bm_change);
	init_waitqueue_head(&b->bm_io_wait);

	mdev->bitmap = b;
	device->bitmap = b;

	return 0;
}

sector_t drbd_bm_capacity(struct drbd_device *mdev)
sector_t drbd_bm_capacity(struct drbd_device *device)
{
	if (!expect(mdev->bitmap))
	if (!expect(device->bitmap))
		return 0;
	return mdev->bitmap->bm_dev_capacity;
	return device->bitmap->bm_dev_capacity;
}

/* called on driver unload. TODO: call when a device is destroyed.
 */
void drbd_bm_cleanup(struct drbd_device *mdev)
void drbd_bm_cleanup(struct drbd_device *device)
{
	if (!expect(mdev->bitmap))
	if (!expect(device->bitmap))
		return;
	bm_free_pages(mdev->bitmap->bm_pages, mdev->bitmap->bm_number_of_pages);
	bm_vk_free(mdev->bitmap->bm_pages, (BM_P_VMALLOCED & mdev->bitmap->bm_flags));
	kfree(mdev->bitmap);
	mdev->bitmap = NULL;
	bm_free_pages(device->bitmap->bm_pages, device->bitmap->bm_number_of_pages);
	bm_vk_free(device->bitmap->bm_pages, (BM_P_VMALLOCED & device->bitmap->bm_flags));
	kfree(device->bitmap);
	device->bitmap = NULL;
}

/*
@@ -631,9 +631,9 @@ static u64 drbd_md_on_disk_bits(struct drbd_backing_dev *ldev)
 * In case this is actually a resize, we copy the old bitmap into the new one.
 * Otherwise, the bitmap is initialized to all bits set.
 */
int drbd_bm_resize(struct drbd_device *mdev, sector_t capacity, int set_new_bits)
int drbd_bm_resize(struct drbd_device *device, sector_t capacity, int set_new_bits)
{
	struct drbd_bitmap *b = mdev->bitmap;
	struct drbd_bitmap *b = device->bitmap;
	unsigned long bits, words, owords, obits;
	unsigned long want, have, onpages; /* number of pages */
	struct page **npages, **opages = NULL;
@@ -643,7 +643,7 @@ int drbd_bm_resize(struct drbd_device *mdev, sector_t capacity, int set_new_bits
	if (!expect(b))
		return -ENOMEM;

	drbd_bm_lock(mdev, "resize", BM_LOCKED_MASK);
	drbd_bm_lock(device, "resize", BM_LOCKED_MASK);

	dev_info(DEV, "drbd_bm_resize called with capacity == %llu\n",
			(unsigned long long)capacity);
@@ -678,9 +678,9 @@ int drbd_bm_resize(struct drbd_device *mdev, sector_t capacity, int set_new_bits
	*/
	words = ALIGN(bits, 64) >> LN2_BPL;

	if (get_ldev(mdev)) {
		u64 bits_on_disk = drbd_md_on_disk_bits(mdev->ldev);
		put_ldev(mdev);
	if (get_ldev(device)) {
		u64 bits_on_disk = drbd_md_on_disk_bits(device->ldev);
		put_ldev(device);
		if (bits > bits_on_disk) {
			dev_info(DEV, "bits = %lu\n", bits);
			dev_info(DEV, "bits_on_disk = %llu\n", bits_on_disk);
@@ -695,7 +695,7 @@ int drbd_bm_resize(struct drbd_device *mdev, sector_t capacity, int set_new_bits
		D_ASSERT(b->bm_pages != NULL);
		npages = b->bm_pages;
	} else {
		if (drbd_insert_fault(mdev, DRBD_FAULT_BM_ALLOC))
		if (drbd_insert_fault(device, DRBD_FAULT_BM_ALLOC))
			npages = NULL;
		else
			npages = bm_realloc_pages(b, want);
@@ -745,7 +745,7 @@ int drbd_bm_resize(struct drbd_device *mdev, sector_t capacity, int set_new_bits
	dev_info(DEV, "resync bitmap: bits=%lu words=%lu pages=%lu\n", bits, words, want);

 out:
	drbd_bm_unlock(mdev);
	drbd_bm_unlock(device);
	return err;
}

@@ -757,9 +757,9 @@ int drbd_bm_resize(struct drbd_device *mdev, sector_t capacity, int set_new_bits
 *
 * maybe bm_set should be atomic_t ?
 */
unsigned long _drbd_bm_total_weight(struct drbd_device *mdev)
unsigned long _drbd_bm_total_weight(struct drbd_device *device)
{
	struct drbd_bitmap *b = mdev->bitmap;
	struct drbd_bitmap *b = device->bitmap;
	unsigned long s;
	unsigned long flags;

@@ -775,20 +775,20 @@ unsigned long _drbd_bm_total_weight(struct drbd_device *mdev)
	return s;
}

unsigned long drbd_bm_total_weight(struct drbd_device *mdev)
unsigned long drbd_bm_total_weight(struct drbd_device *device)
{
	unsigned long s;
	/* if I don't have a disk, I don't know about out-of-sync status */
	if (!get_ldev_if_state(mdev, D_NEGOTIATING))
	if (!get_ldev_if_state(device, D_NEGOTIATING))
		return 0;
	s = _drbd_bm_total_weight(mdev);
	put_ldev(mdev);
	s = _drbd_bm_total_weight(device);
	put_ldev(device);
	return s;
}

size_t drbd_bm_words(struct drbd_device *mdev)
size_t drbd_bm_words(struct drbd_device *device)
{
	struct drbd_bitmap *b = mdev->bitmap;
	struct drbd_bitmap *b = device->bitmap;
	if (!expect(b))
		return 0;
	if (!expect(b->bm_pages))
@@ -797,9 +797,9 @@ size_t drbd_bm_words(struct drbd_device *mdev)
	return b->bm_words;
}

unsigned long drbd_bm_bits(struct drbd_device *mdev)
unsigned long drbd_bm_bits(struct drbd_device *device)
{
	struct drbd_bitmap *b = mdev->bitmap;
	struct drbd_bitmap *b = device->bitmap;
	if (!expect(b))
		return 0;

@@ -811,10 +811,10 @@ unsigned long drbd_bm_bits(struct drbd_device *mdev)
 * bitmap must be locked by drbd_bm_lock.
 * currently only used from receive_bitmap.
 */
void drbd_bm_merge_lel(struct drbd_device *mdev, size_t offset, size_t number,
void drbd_bm_merge_lel(struct drbd_device *device, size_t offset, size_t number,
			unsigned long *buffer)
{
	struct drbd_bitmap *b = mdev->bitmap;
	struct drbd_bitmap *b = device->bitmap;
	unsigned long *p_addr, *bm;
	unsigned long word, bits;
	unsigned int idx;
@@ -860,10 +860,10 @@ void drbd_bm_merge_lel(struct drbd_device *mdev, size_t offset, size_t number,
/* copy number words from the bitmap starting at offset into the buffer.
 * buffer[i] will be little endian unsigned long.
 */
void drbd_bm_get_lel(struct drbd_device *mdev, size_t offset, size_t number,
void drbd_bm_get_lel(struct drbd_device *device, size_t offset, size_t number,
		     unsigned long *buffer)
{
	struct drbd_bitmap *b = mdev->bitmap;
	struct drbd_bitmap *b = device->bitmap;
	unsigned long *p_addr, *bm;
	size_t end, do_now;

@@ -897,9 +897,9 @@ void drbd_bm_get_lel(struct drbd_device *mdev, size_t offset, size_t number,
}

/* set all bits in the bitmap */
void drbd_bm_set_all(struct drbd_device *mdev)
void drbd_bm_set_all(struct drbd_device *device)
{
	struct drbd_bitmap *b = mdev->bitmap;
	struct drbd_bitmap *b = device->bitmap;
	if (!expect(b))
		return;
	if (!expect(b->bm_pages))
@@ -913,9 +913,9 @@ void drbd_bm_set_all(struct drbd_device *mdev)
}

/* clear all bits in the bitmap */
void drbd_bm_clear_all(struct drbd_device *mdev)
void drbd_bm_clear_all(struct drbd_device *device)
{
	struct drbd_bitmap *b = mdev->bitmap;
	struct drbd_bitmap *b = device->bitmap;
	if (!expect(b))
		return;
	if (!expect(b->bm_pages))
@@ -928,7 +928,7 @@ void drbd_bm_clear_all(struct drbd_device *mdev)
}

struct bm_aio_ctx {
	struct drbd_device *mdev;
	struct drbd_device *device;
	atomic_t in_flight;
	unsigned int done;
	unsigned flags;
@@ -943,7 +943,7 @@ static void bm_aio_ctx_destroy(struct kref *kref)
{
	struct bm_aio_ctx *ctx = container_of(kref, struct bm_aio_ctx, kref);

	put_ldev(ctx->mdev);
	put_ldev(ctx->device);
	kfree(ctx);
}

@@ -951,8 +951,8 @@ static void bm_aio_ctx_destroy(struct kref *kref)
static void bm_async_io_complete(struct bio *bio, int error)
{
	struct bm_aio_ctx *ctx = bio->bi_private;
	struct drbd_device *mdev = ctx->mdev;
	struct drbd_bitmap *b = mdev->bitmap;
	struct drbd_device *device = ctx->device;
	struct drbd_bitmap *b = device->bitmap;
	unsigned int idx = bm_page_to_idx(bio->bi_io_vec[0].bv_page);
	int uptodate = bio_flagged(bio, BIO_UPTODATE);

@@ -983,7 +983,7 @@ static void bm_async_io_complete(struct bio *bio, int error)
		dynamic_dev_dbg(DEV, "bitmap page idx %u completed\n", idx);
	}

	bm_page_unlock_io(mdev, idx);
	bm_page_unlock_io(device, idx);

	if (ctx->flags & BM_AIO_COPY_PAGES)
		mempool_free(bio->bi_io_vec[0].bv_page, drbd_md_io_page_pool);
@@ -992,7 +992,7 @@ static void bm_async_io_complete(struct bio *bio, int error)

	if (atomic_dec_and_test(&ctx->in_flight)) {
		ctx->done = 1;
		wake_up(&mdev->misc_wait);
		wake_up(&device->misc_wait);
		kref_put(&ctx->kref, &bm_aio_ctx_destroy);
	}
}
@@ -1000,23 +1000,23 @@ static void bm_async_io_complete(struct bio *bio, int error)
static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must_hold(local)
{
	struct bio *bio = bio_alloc_drbd(GFP_NOIO);
	struct drbd_device *mdev = ctx->mdev;
	struct drbd_bitmap *b = mdev->bitmap;
	struct drbd_device *device = ctx->device;
	struct drbd_bitmap *b = device->bitmap;
	struct page *page;
	unsigned int len;

	sector_t on_disk_sector =
		mdev->ldev->md.md_offset + mdev->ldev->md.bm_offset;
		device->ldev->md.md_offset + device->ldev->md.bm_offset;
	on_disk_sector += ((sector_t)page_nr) << (PAGE_SHIFT-9);

	/* this might happen with very small
	 * flexible external meta data device,
	 * or with PAGE_SIZE > 4k */
	len = min_t(unsigned int, PAGE_SIZE,
		(drbd_md_last_sector(mdev->ldev) - on_disk_sector + 1)<<9);
		(drbd_md_last_sector(device->ldev) - on_disk_sector + 1)<<9);

	/* serialize IO on this page */
	bm_page_lock_io(mdev, page_nr);
	bm_page_lock_io(device, page_nr);
	/* before memcpy and submit,
	 * so it can be redirtied any time */
	bm_set_page_unchanged(b->bm_pages[page_nr]);
@@ -1027,7 +1027,7 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
		bm_store_page_idx(page, page_nr);
	} else
		page = b->bm_pages[page_nr];
	bio->bi_bdev = mdev->ldev->md_bdev;
	bio->bi_bdev = device->ldev->md_bdev;
	bio->bi_iter.bi_sector = on_disk_sector;
	/* bio_add_page of a single page to an empty bio will always succeed,
	 * according to api.  Do we want to assert that? */
@@ -1035,24 +1035,24 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
	bio->bi_private = ctx;
	bio->bi_end_io = bm_async_io_complete;

	if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
	if (drbd_insert_fault(device, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
		bio->bi_rw |= rw;
		bio_endio(bio, -EIO);
	} else {
		submit_bio(rw, bio);
		/* this should not count as user activity and cause the
		 * resync to throttle -- see drbd_rs_should_slow_down(). */
		atomic_add(len >> 9, &mdev->rs_sect_ev);
		atomic_add(len >> 9, &device->rs_sect_ev);
	}
}

/*
 * bm_rw: read/write the whole bitmap from/to its on disk location.
 */
static int bm_rw(struct drbd_device *mdev, int rw, unsigned flags, unsigned lazy_writeout_upper_idx) __must_hold(local)
static int bm_rw(struct drbd_device *device, int rw, unsigned flags, unsigned lazy_writeout_upper_idx) __must_hold(local)
{
	struct bm_aio_ctx *ctx;
	struct drbd_bitmap *b = mdev->bitmap;
	struct drbd_bitmap *b = device->bitmap;
	int num_pages, i, count = 0;
	unsigned long now;
	char ppb[10];
@@ -1072,7 +1072,7 @@ static int bm_rw(struct drbd_device *mdev, int rw, unsigned flags, unsigned lazy
		return -ENOMEM;

	*ctx = (struct bm_aio_ctx) {
		.mdev = mdev,
		.device = device,
		.in_flight = ATOMIC_INIT(1),
		.done = 0,
		.flags = flags,
@@ -1080,7 +1080,7 @@ static int bm_rw(struct drbd_device *mdev, int rw, unsigned flags, unsigned lazy
		.kref = { ATOMIC_INIT(2) },
	};

	if (!get_ldev_if_state(mdev, D_ATTACHING)) {  /* put is in bm_aio_ctx_destroy() */
	if (!get_ldev_if_state(device, D_ATTACHING)) {  /* put is in bm_aio_ctx_destroy() */
		dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in bm_rw()\n");
		kfree(ctx);
		return -ENODEV;
@@ -1132,7 +1132,7 @@ static int bm_rw(struct drbd_device *mdev, int rw, unsigned flags, unsigned lazy
	 * "in_flight reached zero, all done" event.
	 */
	if (!atomic_dec_and_test(&ctx->in_flight))
		wait_until_done_or_force_detached(mdev, mdev->ldev, &ctx->done);
		wait_until_done_or_force_detached(device, device->ldev, &ctx->done);
	else
		kref_put(&ctx->kref, &bm_aio_ctx_destroy);

@@ -1144,7 +1144,7 @@ static int bm_rw(struct drbd_device *mdev, int rw, unsigned flags, unsigned lazy

	if (ctx->error) {
		dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n");
		drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
		drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
		err = -EIO; /* ctx->error ? */
	}

@@ -1153,7 +1153,7 @@ static int bm_rw(struct drbd_device *mdev, int rw, unsigned flags, unsigned lazy

	now = jiffies;
	if (rw == WRITE) {
		drbd_md_flush(mdev);
		drbd_md_flush(device);
	} else /* rw == READ */ {
		b->bm_set = bm_count_bits(b);
		dev_info(DEV, "recounting of set bits took additional %lu jiffies\n",
@@ -1171,38 +1171,38 @@ static int bm_rw(struct drbd_device *mdev, int rw, unsigned flags, unsigned lazy

/**
 * drbd_bm_read() - Read the whole bitmap from its on disk location.
 * @mdev:	DRBD device.
 * @device:	DRBD device.
 */
int drbd_bm_read(struct drbd_device *mdev) __must_hold(local)
int drbd_bm_read(struct drbd_device *device) __must_hold(local)
{
	return bm_rw(mdev, READ, 0, 0);
	return bm_rw(device, READ, 0, 0);
}

/**
 * drbd_bm_write() - Write the whole bitmap to its on disk location.
 * @mdev:	DRBD device.
 * @device:	DRBD device.
 *
 * Will only write pages that have changed since last IO.
 */
int drbd_bm_write(struct drbd_device *mdev) __must_hold(local)
int drbd_bm_write(struct drbd_device *device) __must_hold(local)
{
	return bm_rw(mdev, WRITE, 0, 0);
	return bm_rw(device, WRITE, 0, 0);
}

/**
 * drbd_bm_write_all() - Write the whole bitmap to its on disk location.
 * @mdev:	DRBD device.
 * @device:	DRBD device.
 *
 * Will write all pages.
 */
int drbd_bm_write_all(struct drbd_device *mdev) __must_hold(local)
int drbd_bm_write_all(struct drbd_device *device) __must_hold(local)
{
	return bm_rw(mdev, WRITE, BM_WRITE_ALL_PAGES, 0);
	return bm_rw(device, WRITE, BM_WRITE_ALL_PAGES, 0);
}

/**
 * drbd_bm_write_copy_pages() - Write the whole bitmap to its on disk location.
 * @mdev:	DRBD device.
 * @device:	DRBD device.
 *
 * Will only write pages that have changed since last IO.
 * In contrast to drbd_bm_write(), this will copy the bitmap pages
@@ -1211,23 +1211,23 @@ int drbd_bm_write_all(struct drbd_device *mdev) __must_hold(local)
 * verify is aborted due to a failed peer disk, while local IO continues, or
 * pending resync acks are still being processed.
 */
int drbd_bm_write_copy_pages(struct drbd_device *mdev) __must_hold(local)
int drbd_bm_write_copy_pages(struct drbd_device *device) __must_hold(local)
{
	return bm_rw(mdev, WRITE, BM_AIO_COPY_PAGES, 0);
	return bm_rw(device, WRITE, BM_AIO_COPY_PAGES, 0);
}

/**
 * drbd_bm_write_hinted() - Write bitmap pages with "hint" marks, if they have changed.
 * @mdev:	DRBD device.
 * @device:	DRBD device.
 */
int drbd_bm_write_hinted(struct drbd_device *mdev) __must_hold(local)
int drbd_bm_write_hinted(struct drbd_device *device) __must_hold(local)
{
	return bm_rw(mdev, WRITE, BM_AIO_WRITE_HINTED | BM_AIO_COPY_PAGES, 0);
	return bm_rw(device, WRITE, BM_AIO_WRITE_HINTED | BM_AIO_COPY_PAGES, 0);
}

/**
 * drbd_bm_write_page() - Writes a PAGE_SIZE aligned piece of bitmap
 * @mdev:	DRBD device.
 * @device:	DRBD device.
 * @idx:	bitmap page index
 *
 * We don't want to special case on logical_block_size of the backend device,
@@ -1237,12 +1237,12 @@ int drbd_bm_write_hinted(struct drbd_device *mdev) __must_hold(local)
 * In case this becomes an issue on systems with larger PAGE_SIZE,
 * we may want to change this again to write 4k aligned 4k pieces.
 */
int drbd_bm_write_page(struct drbd_device *mdev, unsigned int idx) __must_hold(local)
int drbd_bm_write_page(struct drbd_device *device, unsigned int idx) __must_hold(local)
{
	struct bm_aio_ctx *ctx;
	int err;

	if (bm_test_page_unchanged(mdev->bitmap->bm_pages[idx])) {
	if (bm_test_page_unchanged(device->bitmap->bm_pages[idx])) {
		dynamic_dev_dbg(DEV, "skipped bm page write for idx %u\n", idx);
		return 0;
	}
@@ -1252,7 +1252,7 @@ int drbd_bm_write_page(struct drbd_device *mdev, unsigned int idx) __must_hold(l
		return -ENOMEM;

	*ctx = (struct bm_aio_ctx) {
		.mdev = mdev,
		.device = device,
		.in_flight = ATOMIC_INIT(1),
		.done = 0,
		.flags = BM_AIO_COPY_PAGES,
@@ -1260,21 +1260,21 @@ int drbd_bm_write_page(struct drbd_device *mdev, unsigned int idx) __must_hold(l
		.kref = { ATOMIC_INIT(2) },
	};

	if (!get_ldev_if_state(mdev, D_ATTACHING)) {  /* put is in bm_aio_ctx_destroy() */
	if (!get_ldev_if_state(device, D_ATTACHING)) {  /* put is in bm_aio_ctx_destroy() */
		dev_err(DEV, "ASSERT FAILED: get_ldev_if_state() == 1 in drbd_bm_write_page()\n");
		kfree(ctx);
		return -ENODEV;
	}

	bm_page_io_async(ctx, idx, WRITE_SYNC);
	wait_until_done_or_force_detached(mdev, mdev->ldev, &ctx->done);
	wait_until_done_or_force_detached(device, device->ldev, &ctx->done);

	if (ctx->error)
		drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR);
		drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
		/* that causes us to detach, so the in memory bitmap will be
		 * gone in a moment as well. */

	mdev->bm_writ_cnt++;
	device->bm_writ_cnt++;
	err = atomic_read(&ctx->in_flight) ? -EIO : ctx->error;
	kref_put(&ctx->kref, &bm_aio_ctx_destroy);
	return err;
@@ -1288,10 +1288,10 @@ int drbd_bm_write_page(struct drbd_device *mdev, unsigned int idx) __must_hold(l
 *
 * this returns a bit number, NOT a sector!
 */
static unsigned long __bm_find_next(struct drbd_device *mdev, unsigned long bm_fo,
static unsigned long __bm_find_next(struct drbd_device *device, unsigned long bm_fo,
	const int find_zero_bit)
{
	struct drbd_bitmap *b = mdev->bitmap;
	struct drbd_bitmap *b = device->bitmap;
	unsigned long *p_addr;
	unsigned long bit_offset;
	unsigned i;
@@ -1328,10 +1328,10 @@ static unsigned long __bm_find_next(struct drbd_device *mdev, unsigned long bm_f
	return bm_fo;
}

static unsigned long bm_find_next(struct drbd_device *mdev,
static unsigned long bm_find_next(struct drbd_device *device,
	unsigned long bm_fo, const int find_zero_bit)
{
	struct drbd_bitmap *b = mdev->bitmap;
	struct drbd_bitmap *b = device->bitmap;
	unsigned long i = DRBD_END_OF_BITMAP;

	if (!expect(b))
@@ -1341,39 +1341,39 @@ static unsigned long bm_find_next(struct drbd_device *mdev,

	spin_lock_irq(&b->bm_lock);
	if (BM_DONT_TEST & b->bm_flags)
		bm_print_lock_info(mdev);
		bm_print_lock_info(device);

	i = __bm_find_next(mdev, bm_fo, find_zero_bit);
	i = __bm_find_next(device, bm_fo, find_zero_bit);

	spin_unlock_irq(&b->bm_lock);
	return i;
}

unsigned long drbd_bm_find_next(struct drbd_device *mdev, unsigned long bm_fo)
unsigned long drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo)
{
	return bm_find_next(mdev, bm_fo, 0);
	return bm_find_next(device, bm_fo, 0);
}

#if 0
/* not yet needed for anything. */
unsigned long drbd_bm_find_next_zero(struct drbd_device *mdev, unsigned long bm_fo)
unsigned long drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo)
{
	return bm_find_next(mdev, bm_fo, 1);
	return bm_find_next(device, bm_fo, 1);
}
#endif

/* does not spin_lock_irqsave.
 * you must take drbd_bm_lock() first */
unsigned long _drbd_bm_find_next(struct drbd_device *mdev, unsigned long bm_fo)
unsigned long _drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo)
{
	/* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */
	return __bm_find_next(mdev, bm_fo, 0);
	/* WARN_ON(!(BM_DONT_SET & device->b->bm_flags)); */
	return __bm_find_next(device, bm_fo, 0);
}

unsigned long _drbd_bm_find_next_zero(struct drbd_device *mdev, unsigned long bm_fo)
unsigned long _drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo)
{
	/* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */
	return __bm_find_next(mdev, bm_fo, 1);
	/* WARN_ON(!(BM_DONT_SET & device->b->bm_flags)); */
	return __bm_find_next(device, bm_fo, 1);
}

/* returns number of bits actually changed.
@@ -1382,10 +1382,10 @@ unsigned long _drbd_bm_find_next_zero(struct drbd_device *mdev, unsigned long bm
 * wants bitnr, not sector.
 * expected to be called for only a few bits (e - s about BITS_PER_LONG).
 * Must hold bitmap lock already. */
static int __bm_change_bits_to(struct drbd_device *mdev, const unsigned long s,
static int __bm_change_bits_to(struct drbd_device *device, const unsigned long s,
	unsigned long e, int val)
{
	struct drbd_bitmap *b = mdev->bitmap;
	struct drbd_bitmap *b = device->bitmap;
	unsigned long *p_addr = NULL;
	unsigned long bitnr;
	unsigned int last_page_nr = -1U;
@@ -1431,11 +1431,11 @@ static int __bm_change_bits_to(struct drbd_device *mdev, const unsigned long s,
 * for val != 0, we change 0 -> 1, return code positive
 * for val == 0, we change 1 -> 0, return code negative
 * wants bitnr, not sector */
static int bm_change_bits_to(struct drbd_device *mdev, const unsigned long s,
static int bm_change_bits_to(struct drbd_device *device, const unsigned long s,
	const unsigned long e, int val)
{
	unsigned long flags;
	struct drbd_bitmap *b = mdev->bitmap;
	struct drbd_bitmap *b = device->bitmap;
	int c = 0;

	if (!expect(b))
@@ -1445,24 +1445,24 @@ static int bm_change_bits_to(struct drbd_device *mdev, const unsigned long s,

	spin_lock_irqsave(&b->bm_lock, flags);
	if ((val ? BM_DONT_SET : BM_DONT_CLEAR) & b->bm_flags)
		bm_print_lock_info(mdev);
		bm_print_lock_info(device);

	c = __bm_change_bits_to(mdev, s, e, val);
	c = __bm_change_bits_to(device, s, e, val);

	spin_unlock_irqrestore(&b->bm_lock, flags);
	return c;
}

/* returns number of bits changed 0 -> 1 */
int drbd_bm_set_bits(struct drbd_device *mdev, const unsigned long s, const unsigned long e)
int drbd_bm_set_bits(struct drbd_device *device, const unsigned long s, const unsigned long e)
{
	return bm_change_bits_to(mdev, s, e, 1);
	return bm_change_bits_to(device, s, e, 1);
}

/* returns number of bits changed 1 -> 0 */
int drbd_bm_clear_bits(struct drbd_device *mdev, const unsigned long s, const unsigned long e)
int drbd_bm_clear_bits(struct drbd_device *device, const unsigned long s, const unsigned long e)
{
	return -bm_change_bits_to(mdev, s, e, 0);
	return -bm_change_bits_to(device, s, e, 0);
}

/* sets all bits in full words,
@@ -1494,7 +1494,7 @@ static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b,
 * You must first drbd_bm_lock().
 * Can be called to set the whole bitmap in one go.
 * Sets bits from s to e _inclusive_. */
void _drbd_bm_set_bits(struct drbd_device *mdev, const unsigned long s, const unsigned long e)
void _drbd_bm_set_bits(struct drbd_device *device, const unsigned long s, const unsigned long e)
{
	/* First set_bit from the first bit (s)
	 * up to the next long boundary (sl),
@@ -1504,7 +1504,7 @@ void _drbd_bm_set_bits(struct drbd_device *mdev, const unsigned long s, const un
	 * Do not use memset, because we must account for changes,
	 * so we need to loop over the words with hweight() anyways.
	 */
	struct drbd_bitmap *b = mdev->bitmap;
	struct drbd_bitmap *b = device->bitmap;
	unsigned long sl = ALIGN(s,BITS_PER_LONG);
	unsigned long el = (e+1) & ~((unsigned long)BITS_PER_LONG-1);
	int first_page;
@@ -1516,7 +1516,7 @@ void _drbd_bm_set_bits(struct drbd_device *mdev, const unsigned long s, const un
	if (e - s <= 3*BITS_PER_LONG) {
		/* don't bother; el and sl may even be wrong. */
		spin_lock_irq(&b->bm_lock);
		__bm_change_bits_to(mdev, s, e, 1);
		__bm_change_bits_to(device, s, e, 1);
		spin_unlock_irq(&b->bm_lock);
		return;
	}
@@ -1527,7 +1527,7 @@ void _drbd_bm_set_bits(struct drbd_device *mdev, const unsigned long s, const un

	/* bits filling the current long */
	if (sl)
		__bm_change_bits_to(mdev, s, sl-1, 1);
		__bm_change_bits_to(device, s, sl-1, 1);

	first_page = sl >> (3 + PAGE_SHIFT);
	last_page = el >> (3 + PAGE_SHIFT);
@@ -1539,7 +1539,7 @@ void _drbd_bm_set_bits(struct drbd_device *mdev, const unsigned long s, const un

	/* first and full pages, unless first page == last page */
	for (page_nr = first_page; page_nr < last_page; page_nr++) {
		bm_set_full_words_within_one_page(mdev->bitmap, page_nr, first_word, last_word);
		bm_set_full_words_within_one_page(device->bitmap, page_nr, first_word, last_word);
		spin_unlock_irq(&b->bm_lock);
		cond_resched();
		first_word = 0;
@@ -1555,7 +1555,7 @@ void _drbd_bm_set_bits(struct drbd_device *mdev, const unsigned long s, const un
	 * as we did not allocate it, it is not present in bitmap->bm_pages.
	 */
	if (last_word)
		bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word);
		bm_set_full_words_within_one_page(device->bitmap, last_page, first_word, last_word);

	/* possibly trailing bits.
	 * example: (e & 63) == 63, el will be e+1.
@@ -1563,7 +1563,7 @@ void _drbd_bm_set_bits(struct drbd_device *mdev, const unsigned long s, const un
	 * it would trigger an assert in __bm_change_bits_to()
	 */
	if (el <= e)
		__bm_change_bits_to(mdev, el, e, 1);
		__bm_change_bits_to(device, el, e, 1);
	spin_unlock_irq(&b->bm_lock);
}

@@ -1574,10 +1574,10 @@ void _drbd_bm_set_bits(struct drbd_device *mdev, const unsigned long s, const un
 *  0 ... bit not set
 * -1 ... first out of bounds access, stop testing for bits!
 */
int drbd_bm_test_bit(struct drbd_device *mdev, const unsigned long bitnr)
int drbd_bm_test_bit(struct drbd_device *device, const unsigned long bitnr)
{
	unsigned long flags;
	struct drbd_bitmap *b = mdev->bitmap;
	struct drbd_bitmap *b = device->bitmap;
	unsigned long *p_addr;
	int i;

@@ -1588,7 +1588,7 @@ int drbd_bm_test_bit(struct drbd_device *mdev, const unsigned long bitnr)

	spin_lock_irqsave(&b->bm_lock, flags);
	if (BM_DONT_TEST & b->bm_flags)
		bm_print_lock_info(mdev);
		bm_print_lock_info(device);
	if (bitnr < b->bm_bits) {
		p_addr = bm_map_pidx(b, bm_bit_to_page_idx(b, bitnr));
		i = test_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr) ? 1 : 0;
@@ -1605,10 +1605,10 @@ int drbd_bm_test_bit(struct drbd_device *mdev, const unsigned long bitnr)
}

/* returns number of bits set in the range [s, e] */
int drbd_bm_count_bits(struct drbd_device *mdev, const unsigned long s, const unsigned long e)
int drbd_bm_count_bits(struct drbd_device *device, const unsigned long s, const unsigned long e)
{
	unsigned long flags;
	struct drbd_bitmap *b = mdev->bitmap;
	struct drbd_bitmap *b = device->bitmap;
	unsigned long *p_addr = NULL;
	unsigned long bitnr;
	unsigned int page_nr = -1U;
@@ -1625,7 +1625,7 @@ int drbd_bm_count_bits(struct drbd_device *mdev, const unsigned long s, const un

	spin_lock_irqsave(&b->bm_lock, flags);
	if (BM_DONT_TEST & b->bm_flags)
		bm_print_lock_info(mdev);
		bm_print_lock_info(device);
	for (bitnr = s; bitnr <= e; bitnr++) {
		unsigned int idx = bm_bit_to_page_idx(b, bitnr);
		if (page_nr != idx) {
@@ -1660,9 +1660,9 @@ int drbd_bm_count_bits(struct drbd_device *mdev, const unsigned long s, const un
 * reference count of some bitmap extent element from some lru instead...
 *
 */
int drbd_bm_e_weight(struct drbd_device *mdev, unsigned long enr)
int drbd_bm_e_weight(struct drbd_device *device, unsigned long enr)
{
	struct drbd_bitmap *b = mdev->bitmap;
	struct drbd_bitmap *b = device->bitmap;
	int count, s, e;
	unsigned long flags;
	unsigned long *p_addr, *bm;
@@ -1674,7 +1674,7 @@ int drbd_bm_e_weight(struct drbd_device *mdev, unsigned long enr)

	spin_lock_irqsave(&b->bm_lock, flags);
	if (BM_DONT_TEST & b->bm_flags)
		bm_print_lock_info(mdev);
		bm_print_lock_info(device);

	s = S2W(enr);
	e = min((size_t)S2W(enr+1), b->bm_words);
+243 −243

File changed.

Preview size limit exceeded, changes collapsed.

+593 −593

File changed.

Preview size limit exceeded, changes collapsed.

+469 −469

File changed.

Preview size limit exceeded, changes collapsed.

Loading