Loading drivers/mmc/core/sdio_ops.c +22 −17 Original line number Diff line number Diff line Loading @@ -121,6 +121,7 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn, struct sg_table sgtable; unsigned int nents, left_size, i; unsigned int seg_size = card->host->max_seg_size; int err; WARN_ON(blksz == 0); Loading Loading @@ -170,28 +171,32 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn, mmc_set_data_timeout(&data, card); mmc_wait_for_req(card->host, &mrq); mmc_pre_req(card->host, &mrq); if (nents > 1) sg_free_table(&sgtable); mmc_wait_for_req(card->host, &mrq); if (cmd.error) return cmd.error; if (data.error) return data.error; if (mmc_host_is_spi(card->host)) { err = cmd.error; else if (data.error) err = data.error; else if (mmc_host_is_spi(card->host)) /* host driver already reported errors */ } else { if (cmd.resp[0] & R5_ERROR) return -EIO; if (cmd.resp[0] & R5_FUNCTION_NUMBER) return -EINVAL; if (cmd.resp[0] & R5_OUT_OF_RANGE) return -ERANGE; } err = 0; else if (cmd.resp[0] & R5_ERROR) err = -EIO; else if (cmd.resp[0] & R5_FUNCTION_NUMBER) err = -EINVAL; else if (cmd.resp[0] & R5_OUT_OF_RANGE) err = -ERANGE; else err = 0; return 0; mmc_post_req(card->host, &mrq, err); if (nents > 1) sg_free_table(&sgtable); return err; } int sdio_reset(struct mmc_host *host) Loading drivers/mmc/host/Kconfig +1 −1 Original line number Diff line number Diff line Loading @@ -614,7 +614,7 @@ config MMC_GOLDFISH config MMC_SPI tristate "MMC/SD/SDIO over SPI" depends on SPI_MASTER && HAS_DMA depends on SPI_MASTER select CRC7 select CRC_ITU_T help Loading drivers/mmc/host/mmc_spi.c +52 −34 Original line number Diff line number Diff line Loading @@ -1278,6 +1278,52 @@ mmc_spi_detect_irq(int irq, void *mmc) return IRQ_HANDLED; } #ifdef CONFIG_HAS_DMA static int mmc_spi_dma_alloc(struct mmc_spi_host *host) { struct spi_device *spi = host->spi; struct device *dev; if (!spi->master->dev.parent->dma_mask) return 0; dev = spi->master->dev.parent; host->ones_dma = dma_map_single(dev, host->ones, MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE); if (dma_mapping_error(dev, host->ones_dma)) return -ENOMEM; host->data_dma = dma_map_single(dev, host->data, sizeof(*host->data), DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, host->data_dma)) { dma_unmap_single(dev, host->ones_dma, MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE); return -ENOMEM; } dma_sync_single_for_cpu(dev, host->data_dma, sizeof(*host->data), DMA_BIDIRECTIONAL); host->dma_dev = dev; return 0; } static void mmc_spi_dma_free(struct mmc_spi_host *host) { if (!host->dma_dev) return; dma_unmap_single(host->dma_dev, host->ones_dma, MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE); dma_unmap_single(host->dma_dev, host->data_dma, sizeof(*host->data), DMA_BIDIRECTIONAL); } #else static inline mmc_spi_dma_alloc(struct mmc_spi_host *host) { return 0; } static inline void mmc_spi_dma_free(struct mmc_spi_host *host) {} #endif static int mmc_spi_probe(struct spi_device *spi) { void *ones; Loading Loading @@ -1374,23 +1420,9 @@ static int mmc_spi_probe(struct spi_device *spi) if (!host->data) goto fail_nobuf1; if (spi->master->dev.parent->dma_mask) { struct device *dev = spi->master->dev.parent; host->dma_dev = dev; host->ones_dma = dma_map_single(dev, ones, MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE); if (dma_mapping_error(dev, host->ones_dma)) goto fail_ones_dma; host->data_dma = dma_map_single(dev, host->data, sizeof(*host->data), DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, host->data_dma)) goto fail_data_dma; dma_sync_single_for_cpu(host->dma_dev, host->data_dma, sizeof(*host->data), DMA_BIDIRECTIONAL); } status = mmc_spi_dma_alloc(host); if (status) goto fail_dma; /* setup message for status/busy readback */ spi_message_init(&host->readback); Loading Loading @@ -1458,20 +1490,12 @@ static int mmc_spi_probe(struct spi_device *spi) fail_add_host: mmc_remove_host(mmc); fail_glue_init: if (host->dma_dev) dma_unmap_single(host->dma_dev, host->data_dma, sizeof(*host->data), DMA_BIDIRECTIONAL); fail_data_dma: if (host->dma_dev) dma_unmap_single(host->dma_dev, host->ones_dma, MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE); fail_ones_dma: mmc_spi_dma_free(host); fail_dma: kfree(host->data); fail_nobuf1: mmc_free_host(mmc); mmc_spi_put_pdata(spi); nomem: kfree(ones); return status; Loading @@ -1489,13 +1513,7 @@ static int mmc_spi_remove(struct spi_device *spi) mmc_remove_host(mmc); if (host->dma_dev) { dma_unmap_single(host->dma_dev, host->ones_dma, MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE); dma_unmap_single(host->dma_dev, host->data_dma, sizeof(*host->data), DMA_BIDIRECTIONAL); } mmc_spi_dma_free(host); kfree(host->data); kfree(host->ones); Loading drivers/mmc/host/sdhci-acpi.c +24 −7 Original line number Diff line number Diff line Loading @@ -551,12 +551,18 @@ static int amd_select_drive_strength(struct mmc_card *card, return MMC_SET_DRIVER_TYPE_A; } static void sdhci_acpi_amd_hs400_dll(struct sdhci_host *host) static void sdhci_acpi_amd_hs400_dll(struct sdhci_host *host, bool enable) { struct sdhci_acpi_host *acpi_host = sdhci_priv(host); struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host); /* AMD Platform requires dll setting */ sdhci_writel(host, 0x40003210, SDHCI_AMD_RESET_DLL_REGISTER); usleep_range(10, 20); if (enable) sdhci_writel(host, 0x40033210, SDHCI_AMD_RESET_DLL_REGISTER); amd_host->dll_enabled = enable; } /* Loading Loading @@ -596,10 +602,8 @@ static void amd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) /* DLL is only required for HS400 */ if (host->timing == MMC_TIMING_MMC_HS400 && !amd_host->dll_enabled) { sdhci_acpi_amd_hs400_dll(host); amd_host->dll_enabled = true; } !amd_host->dll_enabled) sdhci_acpi_amd_hs400_dll(host, true); } } Loading @@ -620,10 +624,23 @@ static int amd_sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) return err; } static void amd_sdhci_reset(struct sdhci_host *host, u8 mask) { struct sdhci_acpi_host *acpi_host = sdhci_priv(host); struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host); if (mask & SDHCI_RESET_ALL) { amd_host->tuned_clock = false; sdhci_acpi_amd_hs400_dll(host, false); } sdhci_reset(host, mask); } static const struct sdhci_ops sdhci_acpi_ops_amd = { .set_clock = sdhci_set_clock, .set_bus_width = sdhci_set_bus_width, .reset = sdhci_reset, .reset = amd_sdhci_reset, .set_uhs_signaling = sdhci_set_uhs_signaling, }; Loading drivers/mmc/host/sdhci-msm.c +17 −1 Original line number Diff line number Diff line Loading @@ -1166,7 +1166,7 @@ static void sdhci_msm_set_cdr(struct sdhci_host *host, bool enable) static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode) { struct sdhci_host *host = mmc_priv(mmc); int tuning_seq_cnt = 3; int tuning_seq_cnt = 10; u8 phase, tuned_phases[16], tuned_phase_cnt = 0; int rc; struct mmc_ios ios = host->mmc->ios; Loading Loading @@ -1222,6 +1222,22 @@ static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode) } while (++phase < ARRAY_SIZE(tuned_phases)); if (tuned_phase_cnt) { if (tuned_phase_cnt == ARRAY_SIZE(tuned_phases)) { /* * All phases valid is _almost_ as bad as no phases * valid. Probably all phases are not really reliable * but we didn't detect where the unreliable place is. * That means we'll essentially be guessing and hoping * we get a good phase. Better to try a few times. */ dev_dbg(mmc_dev(mmc), "%s: All phases valid; try again\n", mmc_hostname(mmc)); if (--tuning_seq_cnt) { tuned_phase_cnt = 0; goto retry; } } rc = msm_find_most_appropriate_phase(host, tuned_phases, tuned_phase_cnt); if (rc < 0) Loading Loading
drivers/mmc/core/sdio_ops.c +22 −17 Original line number Diff line number Diff line Loading @@ -121,6 +121,7 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn, struct sg_table sgtable; unsigned int nents, left_size, i; unsigned int seg_size = card->host->max_seg_size; int err; WARN_ON(blksz == 0); Loading Loading @@ -170,28 +171,32 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn, mmc_set_data_timeout(&data, card); mmc_wait_for_req(card->host, &mrq); mmc_pre_req(card->host, &mrq); if (nents > 1) sg_free_table(&sgtable); mmc_wait_for_req(card->host, &mrq); if (cmd.error) return cmd.error; if (data.error) return data.error; if (mmc_host_is_spi(card->host)) { err = cmd.error; else if (data.error) err = data.error; else if (mmc_host_is_spi(card->host)) /* host driver already reported errors */ } else { if (cmd.resp[0] & R5_ERROR) return -EIO; if (cmd.resp[0] & R5_FUNCTION_NUMBER) return -EINVAL; if (cmd.resp[0] & R5_OUT_OF_RANGE) return -ERANGE; } err = 0; else if (cmd.resp[0] & R5_ERROR) err = -EIO; else if (cmd.resp[0] & R5_FUNCTION_NUMBER) err = -EINVAL; else if (cmd.resp[0] & R5_OUT_OF_RANGE) err = -ERANGE; else err = 0; return 0; mmc_post_req(card->host, &mrq, err); if (nents > 1) sg_free_table(&sgtable); return err; } int sdio_reset(struct mmc_host *host) Loading
drivers/mmc/host/Kconfig +1 −1 Original line number Diff line number Diff line Loading @@ -614,7 +614,7 @@ config MMC_GOLDFISH config MMC_SPI tristate "MMC/SD/SDIO over SPI" depends on SPI_MASTER && HAS_DMA depends on SPI_MASTER select CRC7 select CRC_ITU_T help Loading
drivers/mmc/host/mmc_spi.c +52 −34 Original line number Diff line number Diff line Loading @@ -1278,6 +1278,52 @@ mmc_spi_detect_irq(int irq, void *mmc) return IRQ_HANDLED; } #ifdef CONFIG_HAS_DMA static int mmc_spi_dma_alloc(struct mmc_spi_host *host) { struct spi_device *spi = host->spi; struct device *dev; if (!spi->master->dev.parent->dma_mask) return 0; dev = spi->master->dev.parent; host->ones_dma = dma_map_single(dev, host->ones, MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE); if (dma_mapping_error(dev, host->ones_dma)) return -ENOMEM; host->data_dma = dma_map_single(dev, host->data, sizeof(*host->data), DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, host->data_dma)) { dma_unmap_single(dev, host->ones_dma, MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE); return -ENOMEM; } dma_sync_single_for_cpu(dev, host->data_dma, sizeof(*host->data), DMA_BIDIRECTIONAL); host->dma_dev = dev; return 0; } static void mmc_spi_dma_free(struct mmc_spi_host *host) { if (!host->dma_dev) return; dma_unmap_single(host->dma_dev, host->ones_dma, MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE); dma_unmap_single(host->dma_dev, host->data_dma, sizeof(*host->data), DMA_BIDIRECTIONAL); } #else static inline mmc_spi_dma_alloc(struct mmc_spi_host *host) { return 0; } static inline void mmc_spi_dma_free(struct mmc_spi_host *host) {} #endif static int mmc_spi_probe(struct spi_device *spi) { void *ones; Loading Loading @@ -1374,23 +1420,9 @@ static int mmc_spi_probe(struct spi_device *spi) if (!host->data) goto fail_nobuf1; if (spi->master->dev.parent->dma_mask) { struct device *dev = spi->master->dev.parent; host->dma_dev = dev; host->ones_dma = dma_map_single(dev, ones, MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE); if (dma_mapping_error(dev, host->ones_dma)) goto fail_ones_dma; host->data_dma = dma_map_single(dev, host->data, sizeof(*host->data), DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, host->data_dma)) goto fail_data_dma; dma_sync_single_for_cpu(host->dma_dev, host->data_dma, sizeof(*host->data), DMA_BIDIRECTIONAL); } status = mmc_spi_dma_alloc(host); if (status) goto fail_dma; /* setup message for status/busy readback */ spi_message_init(&host->readback); Loading Loading @@ -1458,20 +1490,12 @@ static int mmc_spi_probe(struct spi_device *spi) fail_add_host: mmc_remove_host(mmc); fail_glue_init: if (host->dma_dev) dma_unmap_single(host->dma_dev, host->data_dma, sizeof(*host->data), DMA_BIDIRECTIONAL); fail_data_dma: if (host->dma_dev) dma_unmap_single(host->dma_dev, host->ones_dma, MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE); fail_ones_dma: mmc_spi_dma_free(host); fail_dma: kfree(host->data); fail_nobuf1: mmc_free_host(mmc); mmc_spi_put_pdata(spi); nomem: kfree(ones); return status; Loading @@ -1489,13 +1513,7 @@ static int mmc_spi_remove(struct spi_device *spi) mmc_remove_host(mmc); if (host->dma_dev) { dma_unmap_single(host->dma_dev, host->ones_dma, MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE); dma_unmap_single(host->dma_dev, host->data_dma, sizeof(*host->data), DMA_BIDIRECTIONAL); } mmc_spi_dma_free(host); kfree(host->data); kfree(host->ones); Loading
drivers/mmc/host/sdhci-acpi.c +24 −7 Original line number Diff line number Diff line Loading @@ -551,12 +551,18 @@ static int amd_select_drive_strength(struct mmc_card *card, return MMC_SET_DRIVER_TYPE_A; } static void sdhci_acpi_amd_hs400_dll(struct sdhci_host *host) static void sdhci_acpi_amd_hs400_dll(struct sdhci_host *host, bool enable) { struct sdhci_acpi_host *acpi_host = sdhci_priv(host); struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host); /* AMD Platform requires dll setting */ sdhci_writel(host, 0x40003210, SDHCI_AMD_RESET_DLL_REGISTER); usleep_range(10, 20); if (enable) sdhci_writel(host, 0x40033210, SDHCI_AMD_RESET_DLL_REGISTER); amd_host->dll_enabled = enable; } /* Loading Loading @@ -596,10 +602,8 @@ static void amd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) /* DLL is only required for HS400 */ if (host->timing == MMC_TIMING_MMC_HS400 && !amd_host->dll_enabled) { sdhci_acpi_amd_hs400_dll(host); amd_host->dll_enabled = true; } !amd_host->dll_enabled) sdhci_acpi_amd_hs400_dll(host, true); } } Loading @@ -620,10 +624,23 @@ static int amd_sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) return err; } static void amd_sdhci_reset(struct sdhci_host *host, u8 mask) { struct sdhci_acpi_host *acpi_host = sdhci_priv(host); struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host); if (mask & SDHCI_RESET_ALL) { amd_host->tuned_clock = false; sdhci_acpi_amd_hs400_dll(host, false); } sdhci_reset(host, mask); } static const struct sdhci_ops sdhci_acpi_ops_amd = { .set_clock = sdhci_set_clock, .set_bus_width = sdhci_set_bus_width, .reset = sdhci_reset, .reset = amd_sdhci_reset, .set_uhs_signaling = sdhci_set_uhs_signaling, }; Loading
drivers/mmc/host/sdhci-msm.c +17 −1 Original line number Diff line number Diff line Loading @@ -1166,7 +1166,7 @@ static void sdhci_msm_set_cdr(struct sdhci_host *host, bool enable) static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode) { struct sdhci_host *host = mmc_priv(mmc); int tuning_seq_cnt = 3; int tuning_seq_cnt = 10; u8 phase, tuned_phases[16], tuned_phase_cnt = 0; int rc; struct mmc_ios ios = host->mmc->ios; Loading Loading @@ -1222,6 +1222,22 @@ static int sdhci_msm_execute_tuning(struct mmc_host *mmc, u32 opcode) } while (++phase < ARRAY_SIZE(tuned_phases)); if (tuned_phase_cnt) { if (tuned_phase_cnt == ARRAY_SIZE(tuned_phases)) { /* * All phases valid is _almost_ as bad as no phases * valid. Probably all phases are not really reliable * but we didn't detect where the unreliable place is. * That means we'll essentially be guessing and hoping * we get a good phase. Better to try a few times. */ dev_dbg(mmc_dev(mmc), "%s: All phases valid; try again\n", mmc_hostname(mmc)); if (--tuning_seq_cnt) { tuned_phase_cnt = 0; goto retry; } } rc = msm_find_most_appropriate_phase(host, tuned_phases, tuned_phase_cnt); if (rc < 0) Loading