Commit f2fe4410 authored by Jens Axboe's avatar Jens Axboe Committed by Yu Kuai
Browse files

block: add blk_time_get_ns() and blk_time_get() helpers

mainline inclusion
from mainline-v6.9-rc1
commit 08420cf70cfb32eed2a0abfeb5c54c5651bd0c99
category: feature
bugzilla: https://gitee.com/openeuler/kernel/issues/IAGRKP
CVE: NA

Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=08420cf70cfb32eed2a0abfeb5c54c5651bd0c99



--------------------------------

Convert any user of ktime_get_ns() to use blk_time_get_ns(), and
ktime_get() to blk_time_get(), so we have a unified API for querying the
current time in nanoseconds or as ktime.

No functional changes intended, this patch just wraps ktime_get_ns()
and ktime_get() with a block helper.

Reviewed-by: default avatarJohannes Thumshirn <johannes.thumshirn@wdc.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>

Conflicts:
	block/bfq-cgroup.c
	block/bfq-iosched.c
	block/blk-cgroup.c
	block/blk-flush.c
	block/blk-iocost.c
	block/blk-iolatency.c
	block/blk-core.c
	block/blk-mq-debugfs.h
	block/blk-mq.c
	block/blk-throttle.c
	block/blk-wbt.c
	block/blk.h
	block/kyber-iosched.c

[Lots of conficts in context, reimplement the patch for current context]
Signed-off-by: default avatarYu Kuai <yukuai3@huawei.com>
parent 2bfb89ac
Loading
Loading
Loading
Loading
+8 −7
Original line number Diff line number Diff line
@@ -22,6 +22,7 @@
#include <linux/sbitmap.h>
#include <linux/delay.h>

#include "blk.h"
#include "bfq-iosched.h"

#if defined(CONFIG_BFQ_GROUP_IOSCHED) &&  defined(CONFIG_DEBUG_BLK_CGROUP)
@@ -60,7 +61,7 @@ static void bfqg_stats_update_group_wait_time(struct bfqg_stats *stats)
	if (!bfqg_stats_waiting(stats))
		return;

	now = ktime_get_ns();
	now = blk_time_get_ns();
	if (now > stats->start_group_wait_time)
		blkg_stat_add(&stats->group_wait_time,
			      now - stats->start_group_wait_time);
@@ -77,7 +78,7 @@ static void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg,
		return;
	if (bfqg == curr_bfqg)
		return;
	stats->start_group_wait_time = ktime_get_ns();
	stats->start_group_wait_time = blk_time_get_ns();
	bfqg_stats_mark_waiting(stats);
}

@@ -89,7 +90,7 @@ static void bfqg_stats_end_empty_time(struct bfqg_stats *stats)
	if (!bfqg_stats_empty(stats))
		return;

	now = ktime_get_ns();
	now = blk_time_get_ns();
	if (now > stats->start_empty_time)
		blkg_stat_add(&stats->empty_time,
			      now - stats->start_empty_time);
@@ -116,7 +117,7 @@ void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg)
	if (bfqg_stats_empty(stats))
		return;

	stats->start_empty_time = ktime_get_ns();
	stats->start_empty_time = blk_time_get_ns();
	bfqg_stats_mark_empty(stats);
}

@@ -125,7 +126,7 @@ void bfqg_stats_update_idle_time(struct bfq_group *bfqg)
	struct bfqg_stats *stats = &bfqg->stats;

	if (bfqg_stats_idling(stats)) {
		u64 now = ktime_get_ns();
		u64 now = blk_time_get_ns();

		if (now > stats->start_idle_time)
			blkg_stat_add(&stats->idle_time,
@@ -138,7 +139,7 @@ void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg)
{
	struct bfqg_stats *stats = &bfqg->stats;

	stats->start_idle_time = ktime_get_ns();
	stats->start_idle_time = blk_time_get_ns();
	bfqg_stats_mark_idling(stats);
}

@@ -175,7 +176,7 @@ void bfqg_stats_update_completion(struct bfq_group *bfqg, u64 start_time_ns,
				  u64 io_start_time_ns, unsigned int op)
{
	struct bfqg_stats *stats = &bfqg->stats;
	u64 now = ktime_get_ns();
	u64 now = blk_time_get_ns();

	if (now > io_start_time_ns)
		blkg_rwstat_add(&stats->service_time, op,
+12 −11
Original line number Diff line number Diff line
@@ -844,7 +844,7 @@ static struct request *bfq_check_fifo(struct bfq_queue *bfqq,

	rq = rq_entry_fifo(bfqq->fifo.next);

	if (rq == last || ktime_get_ns() < rq->fifo_time)
	if (rq == last || blk_time_get_ns() < rq->fifo_time)
		return NULL;

	bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq);
@@ -1566,7 +1566,7 @@ static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd,
		 * bfq_bfqq_update_budg_for_activation for
		 * details on the usage of the next variable.
		 */
		arrived_in_time =  ktime_get_ns() <=
		arrived_in_time =  blk_time_get_ns() <=
			bfqq->ttime.last_end_request +
			bfqd->bfq_slice_idle * 3;

@@ -2468,7 +2468,7 @@ static void bfq_set_budget_timeout(struct bfq_data *bfqd,
	else
		timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight;

	bfqd->last_budget_start = ktime_get();
	bfqd->last_budget_start = blk_time_get();

	bfqq->budget_timeout = jiffies +
		bfqd->bfq_timeout * timeout_coeff;
@@ -2568,7 +2568,7 @@ static void bfq_arm_slice_timer(struct bfq_data *bfqd)
	else if (bfqq->wr_coeff > 1)
		sl = max_t(u32, sl, 20ULL * NSEC_PER_MSEC);

	bfqd->last_idling_start = ktime_get();
	bfqd->last_idling_start = blk_time_get();
	hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl),
		      HRTIMER_MODE_REL);
	bfqg_stats_set_start_idle_time(bfqq_group(bfqq));
@@ -2605,7 +2605,7 @@ static void bfq_reset_rate_computation(struct bfq_data *bfqd,
				       struct request *rq)
{
	if (rq != NULL) { /* new rq dispatch now, reset accordingly */
		bfqd->last_dispatch = bfqd->first_dispatch = ktime_get_ns();
		bfqd->last_dispatch = bfqd->first_dispatch = blk_time_get_ns();
		bfqd->peak_rate_samples = 1;
		bfqd->sequential_samples = 0;
		bfqd->tot_sectors_dispatched = bfqd->last_rq_max_size =
@@ -2762,7 +2762,7 @@ static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq)
 */
static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq)
{
	u64 now_ns = ktime_get_ns();
	u64 now_ns = blk_time_get_ns();

	if (bfqd->peak_rate_samples == 0) { /* first dispatch */
		bfq_log(bfqd, "update_peak_rate: goto reset, samples %d",
@@ -3099,7 +3099,7 @@ static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq,
	if (compensate)
		delta_ktime = bfqd->last_idling_start;
	else
		delta_ktime = ktime_get();
		delta_ktime = blk_time_get();
	delta_ktime = ktime_sub(delta_ktime, bfqd->last_budget_start);
	delta_usecs = ktime_to_us(delta_ktime);

@@ -4410,7 +4410,7 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
		bfq_clear_bfqq_sync(bfqq);

	/* set end request to minus infinity from now */
	bfqq->ttime.last_end_request = ktime_get_ns() + 1;
	bfqq->ttime.last_end_request = blk_time_get_ns() + 1;

	bfq_mark_bfqq_IO_bound(bfqq);

@@ -4528,7 +4528,7 @@ static void bfq_update_io_thinktime(struct bfq_data *bfqd,
				    struct bfq_queue *bfqq)
{
	struct bfq_ttime *ttime = &bfqq->ttime;
	u64 elapsed = ktime_get_ns() - bfqq->ttime.last_end_request;
	u64 elapsed = blk_time_get_ns() - bfqq->ttime.last_end_request;

	elapsed = min_t(u64, elapsed, 2ULL * bfqd->bfq_slice_idle);

@@ -4697,7 +4697,8 @@ static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq)
	bfq_add_request(rq);
	idle_timer_disabled = waiting && !bfq_bfqq_wait_request(bfqq);

	rq->fifo_time = ktime_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)];
	rq->fifo_time = blk_time_get_ns() +
			bfqd->bfq_fifo_expire[rq_is_sync(rq)];
	list_add_tail(&rq->queuelist, &bfqq->fifo);

	bfq_rq_enqueued(bfqd, bfqq, rq);
@@ -4853,7 +4854,7 @@ static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd)
		bfq_weights_tree_remove(bfqd, bfqq);
	}

	now_ns = ktime_get_ns();
	now_ns = blk_time_get_ns();

	bfqq->ttime.last_end_request = now_ns;

+1 −1
Original line number Diff line number Diff line
@@ -1729,7 +1729,7 @@ static void blkcg_scale_delay(struct blkcg_gq *blkg, u64 now)
 */
static void blkcg_maybe_throttle_blkg(struct blkcg_gq *blkg, bool use_memdelay)
{
	u64 now = ktime_to_ns(ktime_get());
	u64 now = blk_time_get_ns();
	u64 exp;
	u64 delay_nsec = 0;
	int tok;
+4 −4
Original line number Diff line number Diff line
@@ -454,7 +454,7 @@ void __blk_rq_init(struct request_queue *q, struct request *rq)
	RB_CLEAR_NODE(&rq->rb_node);
	rq->tag = -1;
	rq->internal_tag = -1;
	rq->start_time_ns = ktime_get_ns();
	rq->start_time_ns = blk_time_get_ns();
	rq->part = NULL;
}

@@ -2952,7 +2952,7 @@ blk_status_t __blk_insert_cloned_request(struct request_queue *q,
			u64 now = 0;

			if (blk_mq_need_time_stamp(rq))
				now = ktime_get_ns();
				now = blk_time_get_ns();

			blk_account_io_done(rq, now);
		}
@@ -3304,7 +3304,7 @@ void blk_start_request(struct request *req)
	blk_dequeue_request(req);

	if (test_bit(QUEUE_FLAG_STATS, &req->q->queue_flags)) {
		req->io_start_time_ns = ktime_get_ns();
		req->io_start_time_ns = blk_time_get_ns();
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
		req->throtl_size = blk_rq_sectors(req);
#endif
@@ -3509,7 +3509,7 @@ EXPORT_SYMBOL_GPL(blk_unprep_request);
void blk_finish_request(struct request *req, blk_status_t error)
{
	struct request_queue *q = req->q;
	u64 now = ktime_get_ns();
	u64 now = blk_time_get_ns();

	lockdep_assert_held(req->q->queue_lock);
	WARN_ON_ONCE(q->mq_ops);
+3 −3
Original line number Diff line number Diff line
@@ -557,7 +557,7 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
	struct rq_wait *rqw;
	struct iolatency_grp *iolat;
	u64 window_start;
	u64 now = ktime_to_ns(ktime_get());
	u64 now = blk_time_get_ns();
	bool issue_as_root = bio_issue_as_root_blkg(bio);
	bool enabled = false;
	int inflight = 0;
@@ -624,7 +624,7 @@ static void blkiolatency_timer_fn(struct timer_list *t)
	struct blk_iolatency *blkiolat = from_timer(blkiolat, t, timer);
	struct blkcg_gq *blkg;
	struct cgroup_subsys_state *pos_css;
	u64 now = ktime_to_ns(ktime_get());
	u64 now = blk_time_get_ns();

	rcu_read_lock();
	blkg_for_each_descendant_pre(blkg, pos_css,
@@ -895,7 +895,7 @@ static void iolatency_pd_init(struct blkg_policy_data *pd)
	struct blkcg_gq *blkg = lat_to_blkg(iolat);
	struct rq_qos *rqos = blkcg_rq_qos(blkg->q);
	struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
	u64 now = ktime_to_ns(ktime_get());
	u64 now = blk_time_get_ns();
	int cpu;

	for_each_possible_cpu(cpu) {
Loading