Commit c8864cb7 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'for-linus-20190202' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "A few fixes that should go into this release. This contains:

   - MD pull request from Song, fixing a recovery OOM issue (Alexei)

   - Fix for a sync related stall (Jianchao)

   - Dummy callback for timeouts (Tetsuo)

   - IDE atapi sense ordering fix (me)"

* tag 'for-linus-20190202' of git://git.kernel.dk/linux-block:
  ide: ensure atapi sense request aren't preempted
  blk-mq: fix a hung issue when fsync
  block: pass no-op callback to INIT_WORK().
  md/raid5: fix 'out of memory' during raid cache recovery
parents 3cde55ee 9a6d5488
Loading
Loading
Loading
Loading
+5 −1
Original line number Diff line number Diff line
@@ -462,6 +462,10 @@ static void blk_rq_timed_out_timer(struct timer_list *t)
	kblockd_schedule_work(&q->timeout_work);
}

static void blk_timeout_work(struct work_struct *work)
{
}

/**
 * blk_alloc_queue_node - allocate a request queue
 * @gfp_mask: memory allocation flags
@@ -505,7 +509,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
	timer_setup(&q->backing_dev_info->laptop_mode_wb_timer,
		    laptop_mode_timer_fn, 0);
	timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
	INIT_WORK(&q->timeout_work, NULL);
	INIT_WORK(&q->timeout_work, blk_timeout_work);
	INIT_LIST_HEAD(&q->icq_list);
#ifdef CONFIG_BLK_CGROUP
	INIT_LIST_HEAD(&q->blkg_list);
+1 −1
Original line number Diff line number Diff line
@@ -335,7 +335,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
	blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
	spin_unlock_irqrestore(&fq->mq_flush_lock, flags);

	blk_mq_run_hw_queue(hctx, true);
	blk_mq_sched_restart(hctx);
}

/**
+8 −1
Original line number Diff line number Diff line
@@ -235,21 +235,28 @@ EXPORT_SYMBOL_GPL(ide_prep_sense);

int ide_queue_sense_rq(ide_drive_t *drive, void *special)
{
	struct request *sense_rq = drive->sense_rq;
	ide_hwif_t *hwif = drive->hwif;
	struct request *sense_rq;
	unsigned long flags;

	spin_lock_irqsave(&hwif->lock, flags);

	/* deferred failure from ide_prep_sense() */
	if (!drive->sense_rq_armed) {
		printk(KERN_WARNING PFX "%s: error queuing a sense request\n",
		       drive->name);
		spin_unlock_irqrestore(&hwif->lock, flags);
		return -ENOMEM;
	}

	sense_rq = drive->sense_rq;
	ide_req(sense_rq)->special = special;
	drive->sense_rq_armed = false;

	drive->hwif->rq = NULL;

	ide_insert_request_head(drive, sense_rq);
	spin_unlock_irqrestore(&hwif->lock, flags);
	return 0;
}
EXPORT_SYMBOL_GPL(ide_queue_sense_rq);
+31 −30
Original line number Diff line number Diff line
@@ -68,8 +68,10 @@ int ide_end_rq(ide_drive_t *drive, struct request *rq, blk_status_t error,
	}

	if (!blk_update_request(rq, error, nr_bytes)) {
		if (rq == drive->sense_rq)
		if (rq == drive->sense_rq) {
			drive->sense_rq = NULL;
			drive->sense_rq_active = false;
		}

		__blk_mq_end_request(rq, error);
		return 0;
@@ -451,16 +453,11 @@ void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
		blk_mq_delay_run_hw_queue(q->queue_hw_ctx[0], 3);
}

/*
 * Issue a new request to a device.
 */
blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
			  const struct blk_mq_queue_data *bd)
blk_status_t ide_issue_rq(ide_drive_t *drive, struct request *rq,
			  bool local_requeue)
{
	ide_drive_t	*drive = hctx->queue->queuedata;
	ide_hwif_t *hwif = drive->hwif;
	struct ide_host *host = hwif->host;
	struct request	*rq = bd->rq;
	ide_startstop_t	startstop;

	if (!blk_rq_is_passthrough(rq) && !(rq->rq_flags & RQF_DONTPREP)) {
@@ -474,8 +471,6 @@ blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
	if (ide_lock_host(host, hwif))
		return BLK_STS_DEV_RESOURCE;

	blk_mq_start_request(rq);

	spin_lock_irq(&hwif->lock);

	if (!ide_lock_port(hwif)) {
@@ -510,18 +505,6 @@ blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
		hwif->cur_dev = drive;
		drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED);

		/*
		 * we know that the queue isn't empty, but this can happen
		 * if ->prep_rq() decides to kill a request
		 */
		if (!rq) {
			rq = bd->rq;
			if (!rq) {
				ide_unlock_port(hwif);
				goto out;
			}
		}

		/*
		 * Sanity: don't accept a request that isn't a PM request
		 * if we are currently power managed. This is very important as
@@ -560,8 +543,11 @@ blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
		}
	} else {
plug_device:
		if (local_requeue)
			list_add(&rq->queuelist, &drive->rq_list);
		spin_unlock_irq(&hwif->lock);
		ide_unlock_host(host);
		if (!local_requeue)
			ide_requeue_and_plug(drive, rq);
		return BLK_STS_OK;
	}
@@ -573,6 +559,26 @@ blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
	return BLK_STS_OK;
}

/*
 * Issue a new request to a device.
 */
blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
			  const struct blk_mq_queue_data *bd)
{
	ide_drive_t *drive = hctx->queue->queuedata;
	ide_hwif_t *hwif = drive->hwif;

	spin_lock_irq(&hwif->lock);
	if (drive->sense_rq_active) {
		spin_unlock_irq(&hwif->lock);
		return BLK_STS_DEV_RESOURCE;
	}
	spin_unlock_irq(&hwif->lock);

	blk_mq_start_request(bd->rq);
	return ide_issue_rq(drive, bd->rq, false);
}

static int drive_is_ready(ide_drive_t *drive)
{
	ide_hwif_t *hwif = drive->hwif;
@@ -893,13 +899,8 @@ EXPORT_SYMBOL_GPL(ide_pad_transfer);

void ide_insert_request_head(ide_drive_t *drive, struct request *rq)
{
	ide_hwif_t *hwif = drive->hwif;
	unsigned long flags;

	spin_lock_irqsave(&hwif->lock, flags);
	drive->sense_rq_active = true;
	list_add_tail(&rq->queuelist, &drive->rq_list);
	spin_unlock_irqrestore(&hwif->lock, flags);

	kblockd_schedule_work(&drive->rq_work);
}
EXPORT_SYMBOL_GPL(ide_insert_request_head);
+2 −0
Original line number Diff line number Diff line
@@ -54,7 +54,9 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
	scsi_req(rq)->cmd[0] = REQ_UNPARK_HEADS;
	scsi_req(rq)->cmd_len = 1;
	ide_req(rq)->type = ATA_PRIV_MISC;
	spin_lock_irq(&hwif->lock);
	ide_insert_request_head(drive, rq);
	spin_unlock_irq(&hwif->lock);

out:
	return;
Loading