aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc
diff options
context:
space:
mode:
authorcodeworkx <daniel.hillenbrand@codeworkx.de>2012-06-02 13:09:29 +0200
committercodeworkx <daniel.hillenbrand@codeworkx.de>2012-06-02 13:09:29 +0200
commitc6da2cfeb05178a11c6d062a06f8078150ee492f (patch)
treef3b4021d252c52d6463a9b3c1bb7245e399b009c /drivers/mmc
parentc6d7c4dbff353eac7919342ae6b3299a378160a6 (diff)
downloadkernel_samsung_smdk4412-c6da2cfeb05178a11c6d062a06f8078150ee492f.zip
kernel_samsung_smdk4412-c6da2cfeb05178a11c6d062a06f8078150ee492f.tar.gz
kernel_samsung_smdk4412-c6da2cfeb05178a11c6d062a06f8078150ee492f.tar.bz2
samsung update 1
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/card/Kconfig14
-rw-r--r--drivers/mmc/card/Makefile8
-rw-r--r--drivers/mmc/card/block.c1582
-rw-r--r--drivers/mmc/card/cprmdrv_samsung.c450
-rw-r--r--drivers/mmc/card/cprmdrv_samsung.h75
-rw-r--r--drivers/mmc/card/queue.c264
-rw-r--r--drivers/mmc/card/queue.h46
-rw-r--r--drivers/mmc/core/Kconfig30
-rw-r--r--drivers/mmc/core/bus.c29
-rw-r--r--drivers/mmc/core/core.c824
-rw-r--r--drivers/mmc/core/core.h3
-rw-r--r--drivers/mmc/core/debugfs.c12
-rw-r--r--drivers/mmc/core/host.c71
-rw-r--r--drivers/mmc/core/host.h21
-rw-r--r--drivers/mmc/core/mmc.c559
-rw-r--r--drivers/mmc/core/mmc_ops.c42
-rw-r--r--drivers/mmc/core/mmc_ops.h1
-rw-r--r--drivers/mmc/core/quirks.c25
-rw-r--r--drivers/mmc/core/sd.c208
-rw-r--r--drivers/mmc/core/sdio.c520
-rw-r--r--drivers/mmc/core/sdio_bus.c13
-rwxr-xr-x[-rw-r--r--]drivers/mmc/core/sdio_io.c36
-rw-r--r--drivers/mmc/core/sdio_irq.c10
-rw-r--r--drivers/mmc/host/Kconfig30
-rw-r--r--drivers/mmc/host/Makefile5
-rw-r--r--drivers/mmc/host/dw_mmc.c803
-rw-r--r--drivers/mmc/host/dw_mmc.h20
-rw-r--r--drivers/mmc/host/mshci-s3c-dma.c220
-rw-r--r--drivers/mmc/host/mshci-s3c.c631
-rw-r--r--drivers/mmc/host/mshci.c2248
-rw-r--r--drivers/mmc/host/mshci.h463
-rw-r--r--drivers/mmc/host/sdhci-s3c.c228
-rw-r--r--drivers/mmc/host/sdhci.c233
-rw-r--r--drivers/mmc/host/sdhci.h3
34 files changed, 8993 insertions, 734 deletions
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index 3b1f783..4283bc2 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -50,6 +50,15 @@ config MMC_BLOCK_BOUNCE
If unsure, say Y here.
+config MMC_BLOCK_DEFERRED_RESUME
+ bool "Deferr MMC layer resume until I/O is requested"
+ depends on MMC_BLOCK
+ default n
+ help
+ Say Y here to enable deferred MMC resume until I/O
+ is requested. This will reduce overall resume latency and
+ save power when theres an SD card inserted but not being used.
+
config SDIO_UART
tristate "SDIO UART/GPS class support"
help
@@ -67,3 +76,8 @@ config MMC_TEST
This driver is only of interest to those developing or
testing a host driver. Most people should say N here.
+
+config MMC_SELECTIVE_PACKED_CMD_POLICY
+ tristate "Change the condition of Pakced command"
+ help
+ Say Y here to change packed_cmd policy
diff --git a/drivers/mmc/card/Makefile b/drivers/mmc/card/Makefile
index c73b406..a6efd4d 100644
--- a/drivers/mmc/card/Makefile
+++ b/drivers/mmc/card/Makefile
@@ -1,10 +1,16 @@
#
# Makefile for MMC/SD card drivers
#
+ifeq ($(CONFIG_MMC_CPRM),y)
+EXTRA_CFLAGS += -I$(src)/cprm/softcprm
+EXTRA_CFLAGS += -I$(src)/cprm/include
+endif
obj-$(CONFIG_MMC_BLOCK) += mmc_block.o
mmc_block-objs := block.o queue.o
+ifeq ($(CONFIG_MMC_CPRM),y)
+mmc_block-objs += cprmdrv_samsung.o
+endif
obj-$(CONFIG_MMC_TEST) += mmc_test.o
obj-$(CONFIG_SDIO_UART) += sdio_uart.o
-
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index f85e422..e505132 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -45,8 +45,35 @@
#include <asm/uaccess.h>
#include "queue.h"
+#include "../core/core.h"
MODULE_ALIAS("mmc:block");
+
+#if defined(CONFIG_MMC_CPRM)
+#define MMC_ENABLE_CPRM
+#endif
+
+#ifdef MMC_ENABLE_CPRM
+#include "cprmdrv_samsung.h"
+#include <linux/ioctl.h>
+#define MMC_IOCTL_BASE 0xB3 /* Same as MMC block device major number */
+#define MMC_IOCTL_GET_SECTOR_COUNT _IOR(MMC_IOCTL_BASE, 100, int)
+#define MMC_IOCTL_GET_SECTOR_SIZE _IOR(MMC_IOCTL_BASE, 101, int)
+#define MMC_IOCTL_GET_BLOCK_SIZE _IOR(MMC_IOCTL_BASE, 102, int)
+#endif
+
+#ifdef MOVI_DEBUG
+struct CMD_LOG {
+ u32 cmd;
+ u32 arg;
+ u32 cnt;
+ u32 rsp;
+ u32 stoprsp;
+};
+struct CMD_LOG gaCmdLog[5];
+int gnCmdLogIdx;
+#endif
+
#ifdef MODULE_PARAM_PREFIX
#undef MODULE_PARAM_PREFIX
#endif
@@ -59,6 +86,13 @@ MODULE_ALIAS("mmc:block");
#define INAND_CMD38_ARG_SECTRIM1 0x81
#define INAND_CMD38_ARG_SECTRIM2 0x88
+#define mmc_req_rel_wr(req) (((req->cmd_flags & REQ_FUA) || \
+ (req->cmd_flags & REQ_META)) && \
+ (rq_data_dir(req) == WRITE))
+#define PACKED_CMD_VER 0x01
+#define PACKED_CMD_RD 0x01
+#define PACKED_CMD_WR 0x02
+
static DEFINE_MUTEX(block_mutex);
/*
@@ -94,6 +128,12 @@ struct mmc_blk_data {
unsigned int read_only;
unsigned int part_type;
unsigned int name_idx;
+ unsigned int reset_done;
+#define MMC_BLK_READ BIT(0)
+#define MMC_BLK_WRITE BIT(1)
+#define MMC_BLK_DISCARD BIT(2)
+#define MMC_BLK_SECDISCARD BIT(3)
+#define MMC_BLK_WR_HDR BIT(4)
/*
* Only set in main mmc_blk_data associated
@@ -106,6 +146,23 @@ struct mmc_blk_data {
static DEFINE_MUTEX(open_lock);
+enum mmc_blk_status {
+ MMC_BLK_SUCCESS = 0,
+ MMC_BLK_PARTIAL,
+ MMC_BLK_CMD_ERR,
+ MMC_BLK_RETRY,
+ MMC_BLK_ABORT,
+ MMC_BLK_DATA_ERR,
+ MMC_BLK_ECC_ERR,
+ MMC_BLK_NOMEDIUM,
+};
+
+enum {
+ MMC_PACKED_N_IDX = -1,
+ MMC_PACKED_N_ZERO,
+ MMC_PACKED_N_SINGLE,
+};
+
module_param(perdev_minors, int, 0444);
MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
@@ -126,11 +183,7 @@ static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
static inline int mmc_get_devidx(struct gendisk *disk)
{
- int devmaj = MAJOR(disk_devt(disk));
- int devidx = MINOR(disk_devt(disk)) / perdev_minors;
-
- if (!devmaj)
- devidx = disk->first_minor / perdev_minors;
+ int devidx = disk->first_minor / perdev_minors;
return devidx;
}
@@ -283,7 +336,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
struct mmc_data data = {0};
struct mmc_request mrq = {0};
struct scatterlist sg;
- int err;
+ int err = 0;
/*
* The caller must have CAP_SYS_RAWIO, and must be calling this on the
@@ -393,7 +446,8 @@ cmd_rel_host:
mmc_release_host(card->host);
cmd_done:
- mmc_blk_put(md);
+ if (md)
+ mmc_blk_put(md);
kfree(idata->buf);
kfree(idata);
return err;
@@ -402,9 +456,45 @@ cmd_done:
static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
+#ifdef MMC_ENABLE_CPRM
+ struct mmc_blk_data *md = bdev->bd_disk->private_data;
+ struct mmc_card *card = md->queue.card;
+#endif
int ret = -EINVAL;
if (cmd == MMC_IOC_CMD)
ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg);
+
+#ifdef MMC_ENABLE_CPRM
+ printk(KERN_DEBUG " %s ], %x ", __func__, cmd);
+
+ switch (cmd) {
+ case MMC_IOCTL_GET_SECTOR_COUNT: {
+ int size = 0;
+
+ size = (int)get_capacity(md->disk) << 9;
+ printk(KERN_DEBUG "[%s]:MMC_IOCTL_GET_SECTOR_COUNT size = %d\n",
+ __func__, size);
+
+ return copy_to_user((void *)arg, &size, sizeof(u64));
+ }
+ break;
+ case ACMD13:
+ case ACMD18:
+ case ACMD25:
+ case ACMD43:
+ case ACMD44:
+ case ACMD45:
+ case ACMD46:
+ case ACMD47:
+ case ACMD48: {
+ struct cprm_request *req = (struct cprm_request *)arg;
+
+ printk(KERN_DEBUG "[%s]: cmd [%x]\n", __func__, cmd);
+ return stub_sendcmd(card, req->cmd, req->arg, \
+ req->len, req->buff);
+ }
+ }
+#endif
return ret;
}
@@ -427,14 +517,6 @@ static const struct block_device_operations mmc_bdops = {
#endif
};
-struct mmc_blk_request {
- struct mmc_request mrq;
- struct mmc_command sbc;
- struct mmc_command cmd;
- struct mmc_command stop;
- struct mmc_data data;
-};
-
static inline int mmc_blk_part_switch(struct mmc_card *card,
struct mmc_blk_data *md)
{
@@ -452,7 +534,7 @@ static inline int mmc_blk_part_switch(struct mmc_card *card,
card->ext_csd.part_time);
if (ret)
return ret;
-}
+ }
main_md->part_curr = md->part_type;
return 0;
@@ -491,8 +573,15 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
data.timeout_clks = card->csd.tacc_clks * 100;
timeout_us = data.timeout_ns / 1000;
- timeout_us += data.timeout_clks * 1000 /
- (card->host->ios.clock / 1000);
+ if (card->host->ios.clock) {
+ /* original */
+ timeout_us += data.timeout_clks * 1000 /
+ (card->host->ios.clock / 1000);
+ } else {
+ /* if clock is 0, assume ios.clock is 50000000(working clock) */
+ timeout_us += data.timeout_clks * 1000 /
+ (50000000 / 1000);
+ }
if (timeout_us > 100000) {
data.timeout_ns = 100000000;
@@ -525,7 +614,20 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
return result;
}
-static u32 get_card_status(struct mmc_card *card, struct request *req)
+static int send_stop(struct mmc_card *card, u32 *status)
+{
+ struct mmc_command cmd = {0};
+ int err;
+
+ cmd.opcode = MMC_STOP_TRANSMISSION;
+ cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+ err = mmc_wait_for_cmd(card->host, &cmd, 5);
+ if (err == 0)
+ *status = cmd.resp[0];
+ return err;
+}
+
+static int get_card_status(struct mmc_card *card, u32 *status, int retries)
{
struct mmc_command cmd = {0};
int err;
@@ -534,11 +636,198 @@ static u32 get_card_status(struct mmc_card *card, struct request *req)
if (!mmc_host_is_spi(card->host))
cmd.arg = card->rca << 16;
cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
- err = mmc_wait_for_cmd(card->host, &cmd, 0);
- if (err)
- printk(KERN_ERR "%s: error %d sending status command",
- req->rq_disk->disk_name, err);
- return cmd.resp[0];
+ err = mmc_wait_for_cmd(card->host, &cmd, retries);
+ if (err == 0)
+ *status = cmd.resp[0];
+ return err;
+}
+
+#define ERR_NOMEDIUM 3
+#define ERR_RETRY 2
+#define ERR_ABORT 1
+#define ERR_CONTINUE 0
+
+static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
+ bool status_valid, u32 status)
+{
+ switch (error) {
+ case -EILSEQ:
+ /* response crc error, retry the r/w cmd */
+ pr_err("%s: %s sending %s command, card status %#x\n",
+ req->rq_disk->disk_name, "response CRC error",
+ name, status);
+ return ERR_RETRY;
+
+ case -ETIMEDOUT:
+ pr_err("%s: %s sending %s command, card status %#x\n",
+ req->rq_disk->disk_name, "timed out", name, status);
+
+ /* If the status cmd initially failed, retry the r/w cmd */
+ if (!status_valid) {
+ pr_err("%s: status not valid, retrying timeout\n", req->rq_disk->disk_name);
+ return ERR_RETRY;
+ }
+ /*
+ * If it was a r/w cmd crc error, or illegal command
+ * (eg, issued in wrong state) then retry - we should
+ * have corrected the state problem above.
+ */
+ if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) {
+ pr_err("%s: command error, retrying timeout\n", req->rq_disk->disk_name);
+ return ERR_RETRY;
+ }
+
+ /* Otherwise abort the command */
+ pr_err("%s: not retrying timeout\n", req->rq_disk->disk_name);
+ return ERR_ABORT;
+
+ default:
+ /* We don't understand the error code the driver gave us */
+ pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
+ req->rq_disk->disk_name, error, status);
+ return ERR_ABORT;
+ }
+}
+
+/*
+ * Initial r/w and stop cmd error recovery.
+ * We don't know whether the card received the r/w cmd or not, so try to
+ * restore things back to a sane state. Essentially, we do this as follows:
+ * - Obtain card status. If the first attempt to obtain card status fails,
+ * the status word will reflect the failed status cmd, not the failed
+ * r/w cmd. If we fail to obtain card status, it suggests we can no
+ * longer communicate with the card.
+ * - Check the card state. If the card received the cmd but there was a
+ * transient problem with the response, it might still be in a data transfer
+ * mode. Try to send it a stop command. If this fails, we can't recover.
+ * - If the r/w cmd failed due to a response CRC error, it was probably
+ * transient, so retry the cmd.
+ * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
+ * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
+ * illegal cmd, retry.
+ * Otherwise we don't understand what happened, so abort.
+ */
+static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
+ struct mmc_blk_request *brq, int *ecc_err)
+{
+ bool prev_cmd_status_valid = true;
+ u32 status, stop_status = 0;
+ int err, retry;
+
+ if (mmc_card_removed(card))
+ return ERR_NOMEDIUM;
+
+ /*
+ * Try to get card status which indicates both the card state
+ * and why there was no response. If the first attempt fails,
+ * we can't be sure the returned status is for the r/w command.
+ */
+ for (retry = 2; retry >= 0; retry--) {
+ err = get_card_status(card, &status, 0);
+ if (!err)
+ break;
+
+ prev_cmd_status_valid = false;
+ pr_err("%s: error %d sending status command, %sing\n",
+ req->rq_disk->disk_name, err, retry ? "retry" : "abort");
+ }
+
+ /* We couldn't get a response from the card. Give up. */
+ if (err) {
+ /* Check if the card is removed */
+ if (mmc_detect_card_removed(card->host))
+ return ERR_NOMEDIUM;
+ return ERR_ABORT;
+ }
+
+ /* Flag ECC errors */
+ if ((status & R1_CARD_ECC_FAILED) ||
+ (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
+ (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
+ *ecc_err = 1;
+
+ /*
+ * Check the current card state. If it is in some data transfer
+ * mode, tell it to stop (and hopefully transition back to TRAN.)
+ */
+ if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
+ R1_CURRENT_STATE(status) == R1_STATE_RCV) {
+ err = send_stop(card, &stop_status);
+ if (err)
+ pr_err("%s: error %d sending stop command\n",
+ req->rq_disk->disk_name, err);
+
+ /*
+ * If the stop cmd also timed out, the card is probably
+ * not present, so abort. Other errors are bad news too.
+ */
+ if (err)
+ return ERR_ABORT;
+ if (stop_status & R1_CARD_ECC_FAILED)
+ *ecc_err = 1;
+ }
+
+ /* Check for set block count errors */
+ if (brq->sbc.error)
+ return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
+ prev_cmd_status_valid, status);
+
+ /* Check for r/w command errors */
+ if (brq->cmd.error)
+ return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
+ prev_cmd_status_valid, status);
+
+ /* Data errors */
+ if (!brq->stop.error)
+ return ERR_CONTINUE;
+
+ /* Now for stop errors. These aren't fatal to the transfer. */
+ pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
+ req->rq_disk->disk_name, brq->stop.error,
+ brq->cmd.resp[0], status);
+
+ /*
+ * Subsitute in our own stop status as this will give the error
+ * state which happened during the execution of the r/w command.
+ */
+ if (stop_status) {
+ brq->stop.resp[0] = stop_status;
+ brq->stop.error = 0;
+ }
+ return ERR_CONTINUE;
+}
+
+static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
+ int type)
+{
+ int err;
+
+ if (md->reset_done & type)
+ return -EEXIST;
+
+ md->reset_done |= type;
+ err = mmc_hw_reset(host);
+ /* Ensure we switch back to the correct partition */
+ if (err != -EOPNOTSUPP) {
+ struct mmc_blk_data *main_md = mmc_get_drvdata(host->card);
+ int part_err;
+
+ main_md->part_curr = main_md->part_type;
+ part_err = mmc_blk_part_switch(host->card, md);
+ if (part_err) {
+ /*
+ * We have failed to get back into the correct
+ * partition, so we need to abort the whole request.
+ */
+ return -ENODEV;
+ }
+ }
+ return err;
+}
+
+static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
+{
+ md->reset_done &= ~type;
}
static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
@@ -546,7 +835,7 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
struct mmc_blk_data *md = mq->data;
struct mmc_card *card = md->queue.card;
unsigned int from, nr, arg;
- int err = 0;
+ int err = 0, type = MMC_BLK_DISCARD;
if (!mmc_can_erase(card)) {
err = -EOPNOTSUPP;
@@ -556,11 +845,13 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
from = blk_rq_pos(req);
nr = blk_rq_sectors(req);
- if (mmc_can_trim(card))
+ if (mmc_can_discard(card))
+ arg = MMC_DISCARD_ARG;
+ else if (mmc_can_trim(card))
arg = MMC_TRIM_ARG;
else
arg = MMC_ERASE_ARG;
-
+retry:
if (card->quirks & MMC_QUIRK_INAND_CMD38) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
INAND_CMD38_ARG_EXT_CSD,
@@ -573,6 +864,10 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
}
err = mmc_erase(card, from, nr, arg);
out:
+ if (err == -EIO && !mmc_blk_reset(md, card->host, type))
+ goto retry;
+ if (!err)
+ mmc_blk_reset_success(md, type);
spin_lock_irq(&md->lock);
__blk_end_request(req, err, blk_rq_bytes(req));
spin_unlock_irq(&md->lock);
@@ -586,13 +881,20 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
struct mmc_blk_data *md = mq->data;
struct mmc_card *card = md->queue.card;
unsigned int from, nr, arg;
- int err = 0;
+ int err = 0, type = MMC_BLK_SECDISCARD;
- if (!mmc_can_secure_erase_trim(card)) {
+ if (!(mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))) {
err = -EOPNOTSUPP;
goto out;
}
+ /* The sanitize operation is supported at v4.5 only */
+ if (mmc_can_sanitize(card)) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_SANITIZE_START, 1, 0);
+ goto out;
+ }
+
from = blk_rq_pos(req);
nr = blk_rq_sectors(req);
@@ -600,7 +902,7 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
arg = MMC_SECURE_TRIM1_ARG;
else
arg = MMC_SECURE_ERASE_ARG;
-
+retry:
if (card->quirks & MMC_QUIRK_INAND_CMD38) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
INAND_CMD38_ARG_EXT_CSD,
@@ -624,6 +926,10 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
}
out:
+ if (err == -EIO && !mmc_blk_reset(md, card->host, type))
+ goto retry;
+ if (!err)
+ mmc_blk_reset_success(md, type);
spin_lock_irq(&md->lock);
__blk_end_request(req, err, blk_rq_bytes(req));
spin_unlock_irq(&md->lock);
@@ -634,16 +940,18 @@ out:
static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
{
struct mmc_blk_data *md = mq->data;
+ struct mmc_card *card = md->queue.card;
+ int ret = 0;
+
+ ret = mmc_flush_cache(card);
+ if (ret)
+ ret = -EIO;
- /*
- * No-op, only service this because we need REQ_FUA for reliable
- * writes.
- */
spin_lock_irq(&md->lock);
- __blk_end_request_all(req, 0);
+ __blk_end_request_all(req, ret);
spin_unlock_irq(&md->lock);
- return 1;
+ return ret ? 0 : 1;
}
/*
@@ -669,12 +977,197 @@ static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
}
}
-static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
+#define CMD_ERRORS \
+ (R1_OUT_OF_RANGE | /* Command argument out of range */ \
+ R1_ADDRESS_ERROR | /* Misaligned address */ \
+ R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
+ R1_WP_VIOLATION | /* Tried to write to protected block */ \
+ R1_CC_ERROR | /* Card controller error */ \
+ R1_ERROR) /* General/unknown error */
+
+static int mmc_blk_err_check(struct mmc_card *card,
+ struct mmc_async_req *areq)
+{
+ struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
+ mmc_active);
+ struct mmc_blk_request *brq = &mq_mrq->brq;
+ struct request *req = mq_mrq->req;
+ int ecc_err = 0;
+
+ /*
+ * sbc.error indicates a problem with the set block count
+ * command. No data will have been transferred.
+ *
+ * cmd.error indicates a problem with the r/w command. No
+ * data will have been transferred.
+ *
+ * stop.error indicates a problem with the stop command. Data
+ * may have been transferred, or may still be transferring.
+ */
+ if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
+ brq->data.error) {
+#if defined(CONFIG_MACH_M0) || defined(CONFIG_MACH_P4NOTE) || \
+ defined(CONFIG_MACH_C1_USA_ATT)
+ /* dh0421.hwang */
+ if (mmc_card_mmc(card)) {
+ printk(KERN_ERR "[TEST] brq->sbc.opcode=%d,"
+ "brq->cmd.opcode=%d.\n",
+ brq->sbc.opcode, brq->cmd.opcode);
+ printk(KERN_ERR "[TEST] brq->sbc.error=%d,"
+ "brq->cmd.error=%d, brq->stop.error=%d,"
+ "brq->data.error=%d.\n", brq->sbc.error,
+ brq->cmd.error, brq->stop.error,
+ brq->data.error);
+ }
+#endif
+ switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err)) {
+ case ERR_RETRY:
+ return MMC_BLK_RETRY;
+ case ERR_ABORT:
+ return MMC_BLK_ABORT;
+ case ERR_NOMEDIUM:
+ return MMC_BLK_NOMEDIUM;
+ case ERR_CONTINUE:
+ break;
+ }
+ }
+
+ /*
+ * Check for errors relating to the execution of the
+ * initial command - such as address errors. No data
+ * has been transferred.
+ */
+ if (brq->cmd.resp[0] & CMD_ERRORS) {
+ pr_err("%s: r/w command failed, status = %#x\n",
+ req->rq_disk->disk_name, brq->cmd.resp[0]);
+ return MMC_BLK_ABORT;
+ }
+
+ /*
+ * Everything else is either success, or a data error of some
+ * kind. If it was a write, we may have transitioned to
+ * program mode, which we have to wait for it to complete.
+ */
+ if ((!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) ||
+ (mq_mrq->packed_cmd == MMC_PACKED_WR_HDR)) {
+ u32 status;
+ /* timeout value set 0x30000 : It works just SDcard case.
+ * It means send CMD sequencially about 7.8sec.
+ * If SDcard's data line stays low, timeout is about 4sec.
+ * max timeout is up to 300ms
+ */
+ u32 timeout = 0x30000;
+ do {
+ int err = get_card_status(card, &status, 5);
+ if (err) {
+ printk(KERN_ERR "%s: error %d requesting status\n",
+ req->rq_disk->disk_name, err);
+ return MMC_BLK_CMD_ERR;
+ }
+ /*
+ * Some cards mishandle the status bits,
+ * so make sure to check both the busy
+ * indication and the card state.
+ */
+ /* Just SDcard case, decrease timeout */
+ if (mmc_card_sd(card))
+ timeout--;
+ } while ((!(status & R1_READY_FOR_DATA) ||
+ (R1_CURRENT_STATE(status) == R1_STATE_PRG)) &&
+ timeout);
+
+ /* If SDcard stays busy status, timeout is to be zero */
+ if (!timeout) {
+ pr_err("%s: card state has been never changed "
+ "to trans.!\n",
+ req->rq_disk->disk_name);
+ return MMC_BLK_DATA_ERR;
+ }
+ }
+
+ if (brq->data.error) {
+ pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
+ req->rq_disk->disk_name, brq->data.error,
+ (unsigned)blk_rq_pos(req),
+ (unsigned)blk_rq_sectors(req),
+ brq->cmd.resp[0], brq->stop.resp[0]);
+
+ if (rq_data_dir(req) == READ &&
+ mq_mrq->packed_cmd != MMC_PACKED_WR_HDR) {
+ if (ecc_err)
+ return MMC_BLK_ECC_ERR;
+ return MMC_BLK_DATA_ERR;
+ } else {
+ return MMC_BLK_CMD_ERR;
+ }
+ }
+
+ if (!brq->data.bytes_xfered)
+ return MMC_BLK_RETRY;
+
+ if (mq_mrq->packed_cmd != MMC_PACKED_NONE) {
+ if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered))
+ return MMC_BLK_PARTIAL;
+ else
+ return MMC_BLK_SUCCESS;
+ }
+
+ if (blk_rq_bytes(req) != brq->data.bytes_xfered)
+ return MMC_BLK_PARTIAL;
+
+ return MMC_BLK_SUCCESS;
+}
+
+static int mmc_blk_packed_err_check(struct mmc_card *card,
+ struct mmc_async_req *areq)
+{
+ struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
+ mmc_active);
+ struct request *req = mq_rq->req;
+ int err, check, status;
+ u8 ext_csd[512];
+
+ check = mmc_blk_err_check(card, areq);
+ err = get_card_status(card, &status, 0);
+ if (err) {
+ pr_err("%s: error %d sending status command\n",
+ req->rq_disk->disk_name, err);
+ return MMC_BLK_ABORT;
+ }
+
+ if (status & R1_EXP_EVENT) {
+ err = mmc_send_ext_csd(card, ext_csd);
+ if (err) {
+ pr_err("%s: error %d sending ext_csd\n",
+ req->rq_disk->disk_name, err);
+ return MMC_BLK_ABORT;
+ }
+
+ if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] &
+ EXT_CSD_PACKED_FAILURE) &&
+ (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
+ EXT_CSD_PACKED_GENERIC_ERROR)) {
+ if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
+ EXT_CSD_PACKED_INDEXED_ERROR) {
+ mq_rq->packed_fail_idx =
+ ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1;
+ return MMC_BLK_PARTIAL;
+ }
+ }
+ }
+
+ return check;
+}
+
+static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
+ struct mmc_card *card,
+ int disable_multi,
+ struct mmc_queue *mq)
{
+ u32 readcmd, writecmd;
+ struct mmc_blk_request *brq = &mqrq->brq;
+ struct request *req = mqrq->req;
struct mmc_blk_data *md = mq->data;
- struct mmc_card *card = md->queue.card;
- struct mmc_blk_request brq;
- int ret = 1, disable_multi = 0;
/*
* Reliable writes are used to implement Forced Unit Access and
@@ -685,233 +1178,370 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
(rq_data_dir(req) == WRITE) &&
(md->flags & MMC_BLK_REL_WR);
- do {
- struct mmc_command cmd = {0};
- u32 readcmd, writecmd, status = 0;
-
- memset(&brq, 0, sizeof(struct mmc_blk_request));
- brq.mrq.cmd = &brq.cmd;
- brq.mrq.data = &brq.data;
-
- brq.cmd.arg = blk_rq_pos(req);
- if (!mmc_card_blockaddr(card))
- brq.cmd.arg <<= 9;
- brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
- brq.data.blksz = 512;
- brq.stop.opcode = MMC_STOP_TRANSMISSION;
- brq.stop.arg = 0;
- brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
- brq.data.blocks = blk_rq_sectors(req);
-
- /*
- * The block layer doesn't support all sector count
- * restrictions, so we need to be prepared for too big
- * requests.
- */
- if (brq.data.blocks > card->host->max_blk_count)
- brq.data.blocks = card->host->max_blk_count;
+ memset(brq, 0, sizeof(struct mmc_blk_request));
+ brq->mrq.cmd = &brq->cmd;
+ brq->mrq.data = &brq->data;
- /*
- * After a read error, we redo the request one sector at a time
- * in order to accurately determine which sectors can be read
- * successfully.
- */
- if (disable_multi && brq.data.blocks > 1)
- brq.data.blocks = 1;
+ brq->cmd.arg = blk_rq_pos(req);
+ if (!mmc_card_blockaddr(card))
+ brq->cmd.arg <<= 9;
+ brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+ brq->data.blksz = 512;
+ brq->stop.opcode = MMC_STOP_TRANSMISSION;
+ brq->stop.arg = 0;
+ brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+ brq->data.blocks = blk_rq_sectors(req);
- if (brq.data.blocks > 1 || do_rel_wr) {
- /* SPI multiblock writes terminate using a special
- * token, not a STOP_TRANSMISSION request.
- */
- if (!mmc_host_is_spi(card->host) ||
- rq_data_dir(req) == READ)
- brq.mrq.stop = &brq.stop;
- readcmd = MMC_READ_MULTIPLE_BLOCK;
- writecmd = MMC_WRITE_MULTIPLE_BLOCK;
- } else {
- brq.mrq.stop = NULL;
- readcmd = MMC_READ_SINGLE_BLOCK;
- writecmd = MMC_WRITE_BLOCK;
- }
- if (rq_data_dir(req) == READ) {
- brq.cmd.opcode = readcmd;
- brq.data.flags |= MMC_DATA_READ;
- } else {
- brq.cmd.opcode = writecmd;
- brq.data.flags |= MMC_DATA_WRITE;
- }
+ /*
+ * The block layer doesn't support all sector count
+ * restrictions, so we need to be prepared for too big
+ * requests.
+ */
+ if (brq->data.blocks > card->host->max_blk_count)
+ brq->data.blocks = card->host->max_blk_count;
- if (do_rel_wr)
- mmc_apply_rel_rw(&brq, card, req);
+ /*
+ * After a read error, we redo the request one sector at a time
+ * in order to accurately determine which sectors can be read
+ * successfully.
+ */
+ if (disable_multi && brq->data.blocks > 1)
+ brq->data.blocks = 1;
- /*
- * Pre-defined multi-block transfers are preferable to
- * open ended-ones (and necessary for reliable writes).
- * However, it is not sufficient to just send CMD23,
- * and avoid the final CMD12, as on an error condition
- * CMD12 (stop) needs to be sent anyway. This, coupled
- * with Auto-CMD23 enhancements provided by some
- * hosts, means that the complexity of dealing
- * with this is best left to the host. If CMD23 is
- * supported by card and host, we'll fill sbc in and let
- * the host deal with handling it correctly. This means
- * that for hosts that don't expose MMC_CAP_CMD23, no
- * change of behavior will be observed.
- *
- * N.B: Some MMC cards experience perf degradation.
- * We'll avoid using CMD23-bounded multiblock writes for
- * these, while retaining features like reliable writes.
+ if (brq->data.blocks > 1 || do_rel_wr) {
+ /* SPI multiblock writes terminate using a special
+ * token, not a STOP_TRANSMISSION request.
*/
+ if (!mmc_host_is_spi(card->host) ||
+ rq_data_dir(req) == READ)
+ brq->mrq.stop = &brq->stop;
+ readcmd = MMC_READ_MULTIPLE_BLOCK;
+ writecmd = MMC_WRITE_MULTIPLE_BLOCK;
+ } else {
+ brq->mrq.stop = NULL;
+ readcmd = MMC_READ_SINGLE_BLOCK;
+ writecmd = MMC_WRITE_BLOCK;
+ }
+ if (rq_data_dir(req) == READ) {
+ brq->cmd.opcode = readcmd;
+ brq->data.flags |= MMC_DATA_READ;
+ } else {
+ brq->cmd.opcode = writecmd;
+ brq->data.flags |= MMC_DATA_WRITE;
+ }
- if ((md->flags & MMC_BLK_CMD23) &&
- mmc_op_multi(brq.cmd.opcode) &&
- (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) {
- brq.sbc.opcode = MMC_SET_BLOCK_COUNT;
- brq.sbc.arg = brq.data.blocks |
- (do_rel_wr ? (1 << 31) : 0);
- brq.sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
- brq.mrq.sbc = &brq.sbc;
- }
+ if (do_rel_wr)
+ mmc_apply_rel_rw(brq, card, req);
- mmc_set_data_timeout(&brq.data, card);
+ /*
+ * Pre-defined multi-block transfers are preferable to
+ * open ended-ones (and necessary for reliable writes).
+ * However, it is not sufficient to just send CMD23,
+ * and avoid the final CMD12, as on an error condition
+ * CMD12 (stop) needs to be sent anyway. This, coupled
+ * with Auto-CMD23 enhancements provided by some
+ * hosts, means that the complexity of dealing
+ * with this is best left to the host. If CMD23 is
+ * supported by card and host, we'll fill sbc in and let
+ * the host deal with handling it correctly. This means
+ * that for hosts that don't expose MMC_CAP_CMD23, no
+ * change of behavior will be observed.
+ *
+ * N.B: Some MMC cards experience perf degradation.
+ * We'll avoid using CMD23-bounded multiblock writes for
+ * these, while retaining features like reliable writes.
+ */
- brq.data.sg = mq->sg;
- brq.data.sg_len = mmc_queue_map_sg(mq);
+ if ((md->flags & MMC_BLK_CMD23) &&
+ mmc_op_multi(brq->cmd.opcode) &&
+ (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) {
+ brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
+ brq->sbc.arg = brq->data.blocks |
+ (do_rel_wr ? (1 << 31) : 0);
+ brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
+ brq->mrq.sbc = &brq->sbc;
+ }
- /*
- * Adjust the sg list so it is the same size as the
- * request.
- */
- if (brq.data.blocks != blk_rq_sectors(req)) {
- int i, data_size = brq.data.blocks << 9;
- struct scatterlist *sg;
-
- for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) {
- data_size -= sg->length;
- if (data_size <= 0) {
- sg->length += data_size;
- i++;
- break;
- }
+ mmc_set_data_timeout(&brq->data, card);
+
+ brq->data.sg = mqrq->sg;
+ brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
+
+ /*
+ * Adjust the sg list so it is the same size as the
+ * request.
+ */
+ if (brq->data.blocks != blk_rq_sectors(req)) {
+ int i, data_size = brq->data.blocks << 9;
+ struct scatterlist *sg;
+
+ for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
+ data_size -= sg->length;
+ if (data_size <= 0) {
+ sg->length += data_size;
+ i++;
+ break;
}
- brq.data.sg_len = i;
}
+ brq->data.sg_len = i;
+ }
- mmc_queue_bounce_pre(mq);
+ mqrq->mmc_active.mrq = &brq->mrq;
+ mqrq->mmc_active.err_check = mmc_blk_err_check;
- mmc_wait_for_req(card->host, &brq.mrq);
+ mmc_queue_bounce_pre(mqrq);
+}
- mmc_queue_bounce_post(mq);
+static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
+{
+ struct request_queue *q = mq->queue;
+ struct mmc_card *card = mq->card;
+ struct request *cur = req, *next = NULL;
+ struct mmc_blk_data *md = mq->data;
+ bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN;
+ unsigned int req_sectors = 0, phys_segments = 0;
+ unsigned int max_blk_count, max_phys_segs;
+ u8 put_back = 0;
+ u8 max_packed_rw = 0;
+ u8 reqs = 0;
+
+ mq->mqrq_cur->packed_num = MMC_PACKED_N_ZERO;
+
+ if (!(md->flags & MMC_BLK_CMD23) ||
+ !card->ext_csd.packed_event_en)
+ goto no_packed;
+
+ if (rq_data_dir(cur) == READ &&
+ (card->host->caps2 & MMC_CAP2_PACKED_RD))
+ max_packed_rw = card->ext_csd.max_packed_reads;
+ else if ((rq_data_dir(cur) == WRITE) &&
+ (card->host->caps2 & MMC_CAP2_PACKED_WR))
+ max_packed_rw = card->ext_csd.max_packed_writes;
+
+ if (max_packed_rw == 0)
+ goto no_packed;
+
+#ifdef CONFIG_MMC_SELECTIVE_PACKED_CMD_POLICY
+ if (rq_data_dir(cur) == READ)
+ goto no_packed;
+#endif
- /*
- * Check for errors here, but don't jump to cmd_err
- * until later as we need to wait for the card to leave
- * programming mode even when things go wrong.
- */
- if (brq.sbc.error || brq.cmd.error ||
- brq.data.error || brq.stop.error) {
- if (brq.data.blocks > 1 && rq_data_dir(req) == READ) {
- /* Redo read one sector at a time */
- printk(KERN_WARNING "%s: retrying using single "
- "block read\n", req->rq_disk->disk_name);
- disable_multi = 1;
- continue;
- }
- status = get_card_status(card, req);
- }
+ if (mmc_req_rel_wr(cur) &&
+ (md->flags & MMC_BLK_REL_WR) &&
+ !en_rel_wr) {
+ goto no_packed;
+ }
+
+ max_blk_count = min(card->host->max_blk_count,
+ card->host->max_req_size >> 9);
+ if (unlikely(max_blk_count > 0xffff))
+ max_blk_count = 0xffff;
+
+ max_phys_segs = queue_max_segments(q);
+ req_sectors += blk_rq_sectors(cur);
+ phys_segments += req->nr_phys_segments;
- if (brq.sbc.error) {
- printk(KERN_ERR "%s: error %d sending SET_BLOCK_COUNT "
- "command, response %#x, card status %#x\n",
- req->rq_disk->disk_name, brq.sbc.error,
- brq.sbc.resp[0], status);
+ if (rq_data_dir(cur) == WRITE) {
+ req_sectors++;
+ phys_segments++;
+ }
+
+ while (reqs < max_packed_rw - 1) {
+ spin_lock_irq(q->queue_lock);
+ next = blk_fetch_request(q);
+ spin_unlock_irq(q->queue_lock);
+ if (!next)
+ break;
+
+ if (next->cmd_flags & REQ_DISCARD ||
+ next->cmd_flags & REQ_FLUSH) {
+ put_back = 1;
+ break;
+ }
+#ifdef CONFIG_MMC_SELECTIVE_PACKED_CMD_POLICY
+ if ((blk_rq_pos(cur) + blk_rq_sectors(cur)) != \
+ blk_rq_pos(next)) {
+ /* if next request dose not start at end block of
+ previous request */
+ put_back = 1;
+ break;
+ }
+#endif
+ if (rq_data_dir(cur) != rq_data_dir(next)) {
+ put_back = 1;
+ break;
}
- if (brq.cmd.error) {
- printk(KERN_ERR "%s: error %d sending read/write "
- "command, response %#x, card status %#x\n",
- req->rq_disk->disk_name, brq.cmd.error,
- brq.cmd.resp[0], status);
+ if (mmc_req_rel_wr(next) &&
+ (md->flags & MMC_BLK_REL_WR) &&
+ !en_rel_wr) {
+ put_back = 1;
+ break;
}
- if (brq.data.error) {
- if (brq.data.error == -ETIMEDOUT && brq.mrq.stop)
- /* 'Stop' response contains card status */
- status = brq.mrq.stop->resp[0];
- printk(KERN_ERR "%s: error %d transferring data,"
- " sector %u, nr %u, card status %#x\n",
- req->rq_disk->disk_name, brq.data.error,
- (unsigned)blk_rq_pos(req),
- (unsigned)blk_rq_sectors(req), status);
+ req_sectors += blk_rq_sectors(next);
+ if (req_sectors > max_blk_count) {
+ put_back = 1;
+ break;
}
- if (brq.stop.error) {
- printk(KERN_ERR "%s: error %d sending stop command, "
- "response %#x, card status %#x\n",
- req->rq_disk->disk_name, brq.stop.error,
- brq.stop.resp[0], status);
+ phys_segments += next->nr_phys_segments;
+ if (phys_segments > max_phys_segs) {
+ put_back = 1;
+ break;
}
- if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
- do {
- int err;
+ list_add_tail(&next->queuelist, &mq->mqrq_cur->packed_list);
+ cur = next;
+ reqs++;
+ }
- cmd.opcode = MMC_SEND_STATUS;
- cmd.arg = card->rca << 16;
- cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
- err = mmc_wait_for_cmd(card->host, &cmd, 5);
- if (err) {
- printk(KERN_ERR "%s: error %d requesting status\n",
- req->rq_disk->disk_name, err);
- goto cmd_err;
- }
- /*
- * Some cards mishandle the status bits,
- * so make sure to check both the busy
- * indication and the card state.
- */
- } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
- (R1_CURRENT_STATE(cmd.resp[0]) == 7));
-
-#if 0
- if (cmd.resp[0] & ~0x00000900)
- printk(KERN_ERR "%s: status = %08x\n",
- req->rq_disk->disk_name, cmd.resp[0]);
- if (mmc_decode_status(cmd.resp))
- goto cmd_err;
-#endif
- }
+ if (put_back) {
+ spin_lock_irq(q->queue_lock);
+ blk_requeue_request(q, next);
+ spin_unlock_irq(q->queue_lock);
+ }
- if (brq.cmd.error || brq.stop.error || brq.data.error) {
- if (rq_data_dir(req) == READ) {
- /*
- * After an error, we redo I/O one sector at a
- * time, so we only reach here after trying to
- * read a single sector.
- */
- spin_lock_irq(&md->lock);
- ret = __blk_end_request(req, -EIO, brq.data.blksz);
- spin_unlock_irq(&md->lock);
- continue;
- }
- goto cmd_err;
- }
+ if (reqs > 0) {
+ list_add(&req->queuelist, &mq->mqrq_cur->packed_list);
+ mq->mqrq_cur->packed_num = ++reqs;
+ return reqs;
+ }
- /*
- * A block was successfully transferred.
- */
- spin_lock_irq(&md->lock);
- ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
- spin_unlock_irq(&md->lock);
- } while (ret);
+no_packed:
+ mq->mqrq_cur->packed_cmd = MMC_PACKED_NONE;
+ mq->mqrq_cur->packed_num = MMC_PACKED_N_ZERO;
+ return 0;
+}
- return 1;
+static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
+ struct mmc_card *card,
+ struct mmc_queue *mq)
+{
+ struct mmc_blk_request *brq = &mqrq->brq;
+ struct request *req = mqrq->req;
+ struct request *prq;
+ struct mmc_blk_data *md = mq->data;
+ bool do_rel_wr;
+ u32 *packed_cmd_hdr = mqrq->packed_cmd_hdr;
+ u8 i = 1;
- cmd_err:
- /*
- * If this is an SD card and we're writing, we can first
- * mark the known good sectors as ok.
- *
+ mqrq->packed_cmd = (rq_data_dir(req) == READ) ?
+ MMC_PACKED_WR_HDR : MMC_PACKED_WRITE;
+ mqrq->packed_blocks = 0;
+ mqrq->packed_fail_idx = MMC_PACKED_N_IDX;
+
+ memset(packed_cmd_hdr, 0, sizeof(mqrq->packed_cmd_hdr));
+ packed_cmd_hdr[0] = (mqrq->packed_num << 16) |
+ (((rq_data_dir(req) == READ) ?
+ PACKED_CMD_RD : PACKED_CMD_WR) << 8) |
+ PACKED_CMD_VER;
+
+ /*
+ * Argument for each entry of packed group
+ */
+ list_for_each_entry(prq, &mqrq->packed_list, queuelist) {
+ do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR);
+ /* Argument of CMD23*/
+ packed_cmd_hdr[(i * 2)] = (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
+ blk_rq_sectors(prq);
+ /* Argument of CMD18 or CMD25 */
+ packed_cmd_hdr[((i * 2)) + 1] = mmc_card_blockaddr(card) ?
+ blk_rq_pos(prq) : blk_rq_pos(prq) << 9;
+ mqrq->packed_blocks += blk_rq_sectors(prq);
+ i++;
+ }
+
+ memset(brq, 0, sizeof(struct mmc_blk_request));
+ brq->mrq.cmd = &brq->cmd;
+ brq->mrq.data = &brq->data;
+ brq->mrq.sbc = &brq->sbc;
+ brq->mrq.stop = &brq->stop;
+
+ brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
+ brq->sbc.arg = MMC_CMD23_ARG_PACKED |
+ ((rq_data_dir(req) == READ) ? 1 : mqrq->packed_blocks + 1);
+ brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
+
+ brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
+ brq->cmd.arg = blk_rq_pos(req);
+ if (!mmc_card_blockaddr(card))
+ brq->cmd.arg <<= 9;
+ brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ brq->data.blksz = 512;
+ /*
+ * Write separately the packd command header only for packed read.
+ * In case of packed write, header is sent with blocks of data.
+ */
+ brq->data.blocks = (rq_data_dir(req) == READ) ?
+ 1 : mqrq->packed_blocks + 1;
+ brq->data.flags |= MMC_DATA_WRITE;
+
+ brq->stop.opcode = MMC_STOP_TRANSMISSION;
+ brq->stop.arg = 0;
+ brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+
+ mmc_set_data_timeout(&brq->data, card);
+
+ brq->data.sg = mqrq->sg;
+ brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
+
+ mqrq->mmc_active.mrq = &brq->mrq;
+ mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
+
+ mmc_queue_bounce_pre(mqrq);
+}
+
+static void mmc_blk_packed_rrq_prep(struct mmc_queue_req *mqrq,
+ struct mmc_card *card,
+ struct mmc_queue *mq)
+{
+ struct mmc_blk_request *brq = &mqrq->brq;
+ struct request *req = mqrq->req;
+
+ mqrq->packed_cmd = MMC_PACKED_READ;
+
+ memset(brq, 0, sizeof(struct mmc_blk_request));
+ brq->mrq.cmd = &brq->cmd;
+ brq->mrq.data = &brq->data;
+ brq->mrq.stop = &brq->stop;
+
+ brq->cmd.opcode = MMC_READ_MULTIPLE_BLOCK;
+ brq->cmd.arg = blk_rq_pos(req);
+ if (!mmc_card_blockaddr(card))
+ brq->cmd.arg <<= 9;
+ brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+ brq->data.blksz = 512;
+ brq->data.blocks = mqrq->packed_blocks;
+ brq->data.flags |= MMC_DATA_READ;
+
+ brq->stop.opcode = MMC_STOP_TRANSMISSION;
+ brq->stop.arg = 0;
+ brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+
+ mmc_set_data_timeout(&brq->data, card);
+
+ brq->data.sg = mqrq->sg;
+ brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
+
+ mqrq->mmc_active.mrq = &brq->mrq;
+ mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
+
+ mmc_queue_bounce_pre(mqrq);
+}
+
+static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
+ struct mmc_blk_request *brq, struct request *req,
+ int ret)
+{
+ struct mmc_queue_req *mq_rq;
+ mq_rq = container_of(brq, struct mmc_queue_req, brq);
+
+ /*
+ * If this is an SD card and we're writing, we can first
+ * mark the known good sectors as ok.
+ *
* If the card is not SD, we can still ok written sectors
* as reported by the controller (which might be less than
* the real number of written sectors, but never more).
@@ -926,45 +1556,490 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
spin_unlock_irq(&md->lock);
}
} else {
- spin_lock_irq(&md->lock);
- ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
- spin_unlock_irq(&md->lock);
+ if (mq_rq->packed_cmd == MMC_PACKED_NONE) {
+ spin_lock_irq(&md->lock);
+ ret = __blk_end_request(req, 0, brq->data.bytes_xfered);
+ spin_unlock_irq(&md->lock);
+ }
+ }
+ return ret;
+}
+
+static int mmc_blk_chk_hdr_err(struct mmc_queue *mq, int status)
+{
+ struct mmc_blk_data *md = mq->data;
+ struct mmc_card *card = md->queue.card;
+ int type = MMC_BLK_WR_HDR, err = 0;
+
+ switch (status) {
+ case MMC_BLK_PARTIAL:
+ case MMC_BLK_RETRY:
+ err = 0;
+ break;
+ case MMC_BLK_CMD_ERR:
+ case MMC_BLK_ABORT:
+ case MMC_BLK_DATA_ERR:
+ case MMC_BLK_ECC_ERR:
+ err = mmc_blk_reset(md, card->host, type);
+ if (!err)
+ mmc_blk_reset_success(md, type);
+ break;
}
+ return err;
+}
+
+static int mmc_blk_issue_packed_rd(struct mmc_queue *mq,
+ struct mmc_queue_req *mq_rq)
+{
+ struct mmc_blk_data *md = mq->data;
+ struct mmc_card *card = md->queue.card;
+ int status, ret = -EIO, retry = 2;
+
+ do {
+ mmc_start_req(card->host, NULL, (int *) &status);
+ if (status) {
+ ret = mmc_blk_chk_hdr_err(mq, status);
+ if (ret)
+ break;
+ mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq);
+ mmc_start_req(card->host, &mq_rq->mmc_active, NULL);
+ } else {
+ mmc_blk_packed_rrq_prep(mq_rq, card, mq);
+ mmc_start_req(card->host, &mq_rq->mmc_active, NULL);
+ ret = 0;
+ break;
+ }
+ } while (retry-- > 0);
+
+ return ret;
+}
+
+static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
+{
+ struct mmc_blk_data *md = mq->data;
+ struct mmc_card *card = md->queue.card;
+ struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
+ int ret = 1, disable_multi = 0, retry = 0, type;
+ enum mmc_blk_status status;
+ struct mmc_queue_req *mq_rq;
+ struct request *req, *prq;
+ struct mmc_async_req *areq;
+ const u8 packed_num = 2;
+ u8 reqs = 0;
+#ifdef MOVI_DEBUG
+ gnCmdLogIdx = 0;
+#endif
+
+ if (!rqc && !mq->mqrq_prev->req)
+ return 0;
+
+ if (rqc)
+ reqs = mmc_blk_prep_packed_list(mq, rqc);
+
+ do {
+#ifdef MOVI_DEBUG
+ struct mmc_command cmd;
+#endif
+ if (rqc) {
+ if (reqs >= packed_num) {
+ mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur, card, mq);
+ }
+ else
+ mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
+ areq = &mq->mqrq_cur->mmc_active;
+ } else
+ areq = NULL;
+ areq = mmc_start_req(card->host, areq, (int *) &status);
+ if (!areq) {
+ if (mq->mqrq_cur->packed_cmd == MMC_PACKED_WR_HDR)
+ goto snd_packed_rd;
+ else
+ return 0;
+ }
+
+ mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
+ brq = &mq_rq->brq;
+ req = mq_rq->req;
+ type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
+ mmc_queue_bounce_post(mq_rq);
+
+#ifdef MOVI_DEBUG
+ if (card->type == MMC_TYPE_MMC) {
+ gaCmdLog[gnCmdLogIdx].cmd = brq->cmd.opcode;
+ gaCmdLog[gnCmdLogIdx].arg = brq->cmd.arg;
+ gaCmdLog[gnCmdLogIdx].cnt = brq->data.blocks;
+ gaCmdLog[gnCmdLogIdx].rsp = brq->cmd.resp[0];
+ gaCmdLog[gnCmdLogIdx].stoprsp = brq->stop.resp[0];
+ gnCmdLogIdx++;
+
+ if (gnCmdLogIdx >= 5)
+ gnCmdLogIdx = 0;
+ }
+
+ if (brq->cmd.error) {
+ if (card->type == MMC_TYPE_MMC) {
+ get_card_status(card, &status, 0);
+ printk(KERN_ERR "[MOVI_DEBUG] card status is 0x%x\n",
+ status);
+ if (!status) {
+ int err, i, j;
+ for (i = 0; i < 5; i++) {
+ printk(KERN_ERR "[CMD LOG] CMD:%d, ARG:0x%x, CNT:%d, RSP:0x%x, STRSP:0x%x\n",
+ gaCmdLog[gnCmdLogIdx].cmd,
+ gaCmdLog[gnCmdLogIdx].arg,
+ gaCmdLog[gnCmdLogIdx].cnt,
+ gaCmdLog[gnCmdLogIdx].rsp,
+ gaCmdLog[gnCmdLogIdx].stoprsp);
+ gnCmdLogIdx++;
+ if (gnCmdLogIdx >= 5)
+ gnCmdLogIdx = 0;
+ }
+
+ get_card_status(card, &status, 0);
+ printk(KERN_ERR "COMMAND13 response = 0x%x\n",
+ status);
+
+ cmd.opcode = 12;
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_R1;
+ err = mmc_wait_for_cmd
+ (card->host, &cmd, 0);
+ if (err) {
+ printk(KERN_ERR "KERN_ERR %s: error %d CMD12\n",
+ req->rq_disk->disk_name, err);
+ }
+ printk(KERN_ERR "COMD12 RESP = 0x%x\n",
+ cmd.resp[0]);
+ msleep(100);
+
+ get_card_status(card, &status, 0);
+ printk(KERN_ERR "COMMAND13 response = 0x%x\n",
+ status);
+
+ mmc_set_clock(card->host, 400000);
+
+ for (i = 0; i < 3; i++) {
+ cmd.opcode = 1;
+ cmd.arg = 0x40ff8080;
+ cmd.flags = MMC_RSP_R3 |
+ MMC_CMD_BCR;
+ err = mmc_wait_for_cmd
+ (card->host, &cmd, 0);
+ if (err) {
+ printk(KERN_ERR "%s: error %d CMD1\n",
+ req->rq_disk->disk_name,
+ err);
+ }
+ printk(KERN_ERR "COMD1 RESP = 0x%x\n",
+ cmd.resp[0]);
+ msleep(50);
+ }
+
+ for (i = 0; i < 3; i++) {
+ cmd.opcode = 0;
+ cmd.arg = 0x20110210;
+ cmd.flags = MMC_RSP_NONE |
+ MMC_CMD_BC;
+ err = mmc_wait_for_cmd
+ (card->host, &cmd, 0);
+ if (err) {
+ printk(KERN_ERR "%s: error %d CMD0\n",
+ req->rq_disk->disk_name,
+ err);
+ }
+ msleep(50);
+ cmd.opcode = 0;
+ cmd.arg = 0x60FACC06;
+ cmd.flags = MMC_RSP_NONE |
+ MMC_CMD_BC;
+ err = mmc_wait_for_cmd
+ (card->host, &cmd, 0);
+ if (err) {
+ printk(KERN_ERR "%s: error %d CMD0\n",
+ req->rq_disk->disk_name,
+ err);
+ }
+ for (j = 0; j < 3; j++) {
+ msleep(50);
+ cmd.opcode = 1;
+ cmd.arg = 0x0;
+ cmd.flags = MMC_RSP_R3 |
+ MMC_CMD_BCR;
+ err = mmc_wait_for_cmd
+ (card->host, &cmd, 0);
+ if (err) {
+ printk(KERN_ERR "%s: error %d CMD1\n",
+ req->rq_disk->disk_name,
+ err);
+ }
+
+ printk(KERN_ERR "COMD1 RESP = 0x%x\n",
+ cmd.resp[0]);
+ }
+ }
+ panic("MOVINAND DEBUG PANIC\n");
+ }
+ }
+ }
+#endif
+
+ switch (status) {
+ case MMC_BLK_SUCCESS:
+ case MMC_BLK_PARTIAL:
+ /*
+ * A block was successfully transferred.
+ */
+ mmc_blk_reset_success(md, type);
+
+ if (mq_rq->packed_cmd != MMC_PACKED_NONE) {
+ int idx = mq_rq->packed_fail_idx, i = 0;
+ ret = 0;
+ while (!list_empty(&mq_rq->packed_list)) {
+ prq = list_entry_rq(mq_rq->packed_list.next);
+ if (idx == i) {
+ /* retry from error index */
+ mq_rq->packed_num -= idx;
+ mq_rq->req = prq;
+ ret = 1;
+ break;
+ }
+ list_del_init(&prq->queuelist);
+ spin_lock_irq(&md->lock);
+ __blk_end_request(prq, 0, blk_rq_bytes(prq));
+ spin_unlock_irq(&md->lock);
+ i++;
+ }
+ if (mq_rq->packed_num == MMC_PACKED_N_SINGLE) {
+ prq = list_entry_rq(mq_rq->packed_list.next);
+ list_del_init(&prq->queuelist);
+ mq_rq->packed_cmd = MMC_PACKED_NONE;
+ mq_rq->packed_num = MMC_PACKED_N_ZERO;
+ }
+ break;
+ } else {
+ spin_lock_irq(&md->lock);
+ ret = __blk_end_request(req, 0,
+ brq->data.bytes_xfered);
+ spin_unlock_irq(&md->lock);
+ }
+
+ /*
+ * If the blk_end_request function returns non-zero even
+ * though all data has been transferred and no errors
+ * were returned by the host controller, it's a bug.
+ */
+ if (status == MMC_BLK_SUCCESS && ret) {
+ printk(KERN_ERR "%s BUG rq_tot %d d_xfer %d\n",
+ __func__, blk_rq_bytes(req),
+ brq->data.bytes_xfered);
+ rqc = NULL;
+ goto cmd_abort;
+ }
+ break;
+ case MMC_BLK_CMD_ERR:
+ ret = mmc_blk_cmd_err(md, card, brq, req, ret);
+ if (!mmc_blk_reset(md, card->host, type))
+ break;
+ goto cmd_abort;
+ case MMC_BLK_RETRY:
+ if (retry++ < 5)
+ break;
+ /* Fall through */
+ case MMC_BLK_ABORT:
+ if (!mmc_blk_reset(md, card->host, type))
+ break;
+ goto cmd_abort;
+ case MMC_BLK_DATA_ERR: {
+ int err;
+
+ err = mmc_blk_reset(md, card->host, type);
+ if (!err)
+ break;
+ if (err == -ENODEV)
+ goto cmd_abort;
+ if (mq_rq->packed_cmd != MMC_PACKED_NONE)
+ break;
+ /* Fall through */
+ }
+ case MMC_BLK_ECC_ERR:
+ if (brq->data.blocks > 1) {
+ /* Redo read one sector at a time */
+ pr_warning("%s: retrying using single block read\n",
+ req->rq_disk->disk_name);
+ disable_multi = 1;
+ break;
+ }
+ /*
+ * After an error, we redo I/O one sector at a
+ * time, so we only reach here after trying to
+ * read a single sector.
+ */
+ spin_lock_irq(&md->lock);
+ ret = __blk_end_request(req, -EIO,
+ brq->data.blksz);
+ spin_unlock_irq(&md->lock);
+ if (!ret)
+ goto start_new_req;
+ break;
+ case MMC_BLK_NOMEDIUM:
+ goto cmd_abort;
+ }
+
+ if (ret) {
+ if (mq_rq->packed_cmd == MMC_PACKED_NONE) {
+ /*
+ * In case of a incomplete request
+ * prepare it again and resend.
+ */
+ mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq);
+ mmc_start_req(card->host, &mq_rq->mmc_active, NULL);
+ } else {
+ mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq);
+ mmc_start_req(card->host, &mq_rq->mmc_active, NULL);
+ if (mq_rq->packed_cmd == MMC_PACKED_WR_HDR) {
+ if (mmc_blk_issue_packed_rd(mq, mq_rq))
+ goto cmd_abort;
+ }
+ }
+ }
+ } while (ret);
+
+snd_packed_rd:
+ if (mq->mqrq_cur->packed_cmd == MMC_PACKED_WR_HDR) {
+ if (mmc_blk_issue_packed_rd(mq, mq->mqrq_cur))
+ goto start_new_req;
+ }
+ return 1;
+
+ cmd_abort:
spin_lock_irq(&md->lock);
- while (ret)
- ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
+ if (mmc_card_removed(card))
+ req->cmd_flags |= REQ_QUIET;
spin_unlock_irq(&md->lock);
+ if (mq_rq->packed_cmd == MMC_PACKED_NONE) {
+ spin_lock_irq(&md->lock);
+ while (ret)
+ ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
+ spin_unlock_irq(&md->lock);
+ } else {
+ while (!list_empty(&mq_rq->packed_list)) {
+ prq = list_entry_rq(mq_rq->packed_list.next);
+ list_del_init(&prq->queuelist);
+ spin_lock_irq(&md->lock);
+ __blk_end_request(prq, -EIO, blk_rq_bytes(prq));
+ spin_unlock_irq(&md->lock);
+ }
+ }
+#if defined(CONFIG_MACH_M0) || defined(CONFIG_MACH_P4NOTE) || \
+ defined(CONFIG_MACH_C1_USA_ATT)
+ /*
+ * dh0421.hwang
+ * It's for Engineering DEBUGGING only
+ * This has to be removed before PVR(guessing)
+ * Please refer mshci reg dumps
+ */
+ if (mmc_card_mmc(card) && status != 3) {
+ printk(KERN_ERR "[TEST] CMD aborting case in"
+ "MMC's block layer ret %d.\n", ret);
+ printk(KERN_ERR "%s: CMD%d, ARG=0x%x.\n",
+ req->rq_disk->disk_name,
+ brq->cmd.opcode,
+ brq->cmd.arg);
+ printk(KERN_ERR "[TEST] If PACKED_NONE,"
+ "confirm end_request done\n");
+ printk(KERN_ERR "packed CMD type = %d.\n",
+ mq_rq ? mq_rq->packed_cmd : -1);
+ printk(KERN_ERR "[TEST] mmc%d, request returns %d.\n",
+ card->host->index, status);
+ printk(KERN_ERR "[TEST] err means...\n");
+ printk(KERN_ERR "\t1: MMC_BLK_PARTIAL.\n");
+ printk(KERN_ERR "\t2: MMC_BLK_CMD_ERR.\n");
+ printk(KERN_ERR "\t3: MMC_BLK_RETRY.\n");
+ printk(KERN_ERR "\t4: MMC_BLK_ABORT.\n");
+ printk(KERN_ERR "\t5: MMC_BLK_DATA_ERR.\n");
+ printk(KERN_ERR "\t6: MMC_BLK_ECC_ERR.\n");
+ if (!rqc) {
+ panic("[TEST] mmc%d, returns %d.\n",
+ card->host->index, status);
+ }
+ }
+#endif
+
+ start_new_req:
+ if (rqc) {
+ /*
+ * If current request is packed, it needs to put back.
+ */
+ if (mq->mqrq_cur->packed_cmd != MMC_PACKED_NONE) {
+ while (!list_empty(&mq->mqrq_cur->packed_list)) {
+ prq = list_entry_rq(mq->mqrq_cur->packed_list.prev);
+ if (prq->queuelist.prev != &mq->mqrq_cur->packed_list) {
+ list_del_init(&prq->queuelist);
+ spin_lock_irq(mq->queue->queue_lock);
+ blk_requeue_request(mq->queue, prq);
+ spin_unlock_irq(mq->queue->queue_lock);
+ } else {
+ list_del_init(&prq->queuelist);
+ }
+ }
+ mq->mqrq_cur->packed_cmd = MMC_PACKED_NONE;
+ mq->mqrq_cur->packed_num = MMC_PACKED_N_ZERO;
+ }
+ mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
+ mmc_start_req(card->host, &mq->mqrq_cur->mmc_active, NULL);
+ }
return 0;
}
+static int
+mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card);
+
static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
{
int ret;
struct mmc_blk_data *md = mq->data;
struct mmc_card *card = md->queue.card;
- mmc_claim_host(card->host);
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+ if (mmc_bus_needs_resume(card->host)) {
+ mmc_resume_bus(card->host);
+ mmc_blk_set_blksize(md, card);
+ }
+#endif
+
+ if (req && !mq->mqrq_prev->req)
+ /* claim host only for the first request */
+ mmc_claim_host(card->host);
+
ret = mmc_blk_part_switch(card, md);
if (ret) {
ret = 0;
goto out;
}
- if (req->cmd_flags & REQ_DISCARD) {
+ if (req && req->cmd_flags & REQ_DISCARD) {
+ /* complete ongoing async transfer before issuing discard */
+ if (card->host->areq)
+ mmc_blk_issue_rw_rq(mq, NULL);
if (req->cmd_flags & REQ_SECURE)
ret = mmc_blk_issue_secdiscard_rq(mq, req);
else
ret = mmc_blk_issue_discard_rq(mq, req);
- } else if (req->cmd_flags & REQ_FLUSH) {
+ } else if (req && req->cmd_flags & REQ_FLUSH) {
+ /* complete ongoing async transfer before issuing flush */
+ if (card->host->areq)
+ mmc_blk_issue_rw_rq(mq, NULL);
ret = mmc_blk_issue_flush(mq, req);
} else {
ret = mmc_blk_issue_rw_rq(mq, req);
}
out:
- mmc_release_host(card->host);
+ if (!req)
+ /* release host only when there are no more requests */
+ mmc_release_host(card->host);
return ret;
}
@@ -1038,6 +2113,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
md->disk->queue = md->queue.queue;
md->disk->driverfs_dev = parent;
set_disk_ro(md->disk, md->read_only || default_ro);
+ md->disk->flags = GENHD_FL_EXT_DEVT;
/*
* As discussed on lkml, GENHD_FL_REMOVABLE should:
@@ -1271,12 +2347,12 @@ static int mmc_blk_probe(struct mmc_card *card)
md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
cap_str, md->read_only ? "(ro)" : "");
- if (mmc_blk_alloc_parts(card, md))
- goto out;
-
mmc_set_drvdata(card, md);
mmc_fixup_device(card, blk_fixups);
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+ mmc_set_bus_resume_policy(card->host, 1);
+#endif
if (mmc_add_disk(md))
goto out;
@@ -1302,6 +2378,9 @@ static void mmc_blk_remove(struct mmc_card *card)
mmc_release_host(card->host);
mmc_blk_remove_req(md);
mmc_set_drvdata(card, NULL);
+#ifdef CONFIG_MMC_BLOCK_DEFERRED_RESUME
+ mmc_set_bus_resume_policy(card->host, 0);
+#endif
}
#ifdef CONFIG_PM
@@ -1325,7 +2404,9 @@ static int mmc_blk_resume(struct mmc_card *card)
struct mmc_blk_data *md = mmc_get_drvdata(card);
if (md) {
+#ifndef CONFIG_MMC_BLOCK_DEFERRED_RESUME
mmc_blk_set_blksize(md, card);
+#endif
/*
* Resume involves the card going into idle state,
@@ -1389,4 +2470,3 @@ module_exit(mmc_blk_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Multimedia Card (MMC) block device driver");
-
diff --git a/drivers/mmc/card/cprmdrv_samsung.c b/drivers/mmc/card/cprmdrv_samsung.c
new file mode 100644
index 0000000..6f64a7f
--- /dev/null
+++ b/drivers/mmc/card/cprmdrv_samsung.c
@@ -0,0 +1,450 @@
+
+#include <linux/mmc/core.h>
+#include <linux/mmc/card.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/sd.h>
+
+
+#include <linux/scatterlist.h>
+#include <linux/uaccess.h>
+
+#include "cprmdrv_samsung.h"
+#include <linux/slab.h>
+
+
+static int mmc_wait_busy(struct mmc_card *card)
+{
+ int ret, busy;
+ struct mmc_command cmd;
+
+ busy = 0;
+ do {
+ memset(&cmd, 0, sizeof(struct mmc_command));
+
+ cmd.opcode = MMC_SEND_STATUS;
+ cmd.arg = card->rca << 16;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+
+ ret = mmc_wait_for_cmd(card->host, &cmd, 0);
+ if (ret)
+ break;
+
+ if (!busy && !(cmd.resp[0] & R1_READY_FOR_DATA)) {
+ busy = 1;
+ printk(KERN_INFO "%s: Warning: Host did not "
+ "wait for busy state to end.\n",
+ mmc_hostname(card->host));
+ }
+ } while (!(cmd.resp[0] & R1_READY_FOR_DATA));
+
+ return ret;
+}
+
+static int CPRM_CMD_SecureRW(struct mmc_card *card,
+ unsigned int command,
+ unsigned int dir,
+ unsigned long arg,
+ unsigned char *buff,
+ unsigned int length) {
+
+ int err;
+ int i = 0;
+ struct mmc_request mrq;
+ struct mmc_command cmd;
+ struct mmc_command stop;
+ struct mmc_data data;
+ unsigned int timeout_us;
+
+ struct scatterlist sg;
+
+ if (command == SD_ACMD25_SECURE_WRITE_MULTI_BLOCK ||
+ command == SD_ACMD18_SECURE_READ_MULTI_BLOCK) {
+ return -EINVAL;
+ }
+
+ memset(&cmd, 0, sizeof(struct mmc_command));
+
+ cmd.opcode = MMC_APP_CMD;
+ cmd.arg = card->rca << 16;
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+
+ err = mmc_wait_for_cmd(card->host, &cmd, 0);
+ if (err)
+ return (u32)-1;
+
+ if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
+ return (u32)-1;
+
+ printk("CPRM_CMD_SecureRW: 1, command : %d\n", command);
+
+ memset(&cmd, 0, sizeof(struct mmc_command));
+
+ cmd.opcode = command;
+
+ if (command == SD_ACMD43_GET_MKB)
+ cmd.arg = arg;
+ else
+ cmd.arg = 0;
+
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ memset(&data, 0, sizeof(struct mmc_data));
+
+ data.timeout_ns = card->csd.tacc_ns * 100;
+ data.timeout_clks = card->csd.tacc_clks * 100;
+
+ timeout_us = data.timeout_ns / 1000;
+ timeout_us += data.timeout_clks * 1000 /
+ (card->host->ios.clock / 1000);
+
+ if (timeout_us > 100000) {
+ data.timeout_ns = 100000000;
+ data.timeout_clks = 0;
+ }
+
+#if defined(CONFIG_TARGET_LOCALE_NTT)
+ data.timeout_ns = 100000000;
+ data.timeout_clks = 0;
+#endif
+
+ data.blksz = length;
+ data.blocks = 1;
+ data.flags = dir;
+ data.sg = &sg;
+ data.sg_len = 1;
+
+ stop.opcode = MMC_STOP_TRANSMISSION;
+ stop.arg = 0;
+ stop.flags = MMC_RSP_R1B | MMC_CMD_AC;
+
+ memset(&mrq, 0, sizeof(struct mmc_request));
+
+ mrq.cmd = &cmd;
+ mrq.data = &data;
+
+ if (data.blocks == 1)
+ mrq.stop = NULL;
+ else
+ mrq.stop = &stop;
+
+ printk(KERN_DEBUG "CPRM_CMD_SecureRW: 2\n");
+
+ sg_init_one(&sg, buff, length);
+
+ printk(KERN_DEBUG "CPRM_CMD_SecureRW: 3\n");
+
+ mmc_wait_for_req(card->host, &mrq);
+
+ printk(KERN_DEBUG "CPRM_CMD_SecureRW: 4\n");
+
+ i = 0;
+ do {
+ printk(KERN_DEBUG "%x", buff[i++]);
+ if (i > 10)
+ break;
+ } while (i < length);
+ printk(KERN_DEBUG "\n");
+
+ if (cmd.error) {
+ printk(KERN_DEBUG "%s]cmd.error=%d\n ", __func__, cmd.error);
+ return cmd.error;
+ }
+
+ if (data.error) {
+ printk(KERN_DEBUG "%s]data.error=%d\n ", __func__, data.error);
+ return data.error;
+ }
+
+ err = mmc_wait_busy(card);
+ printk(KERN_DEBUG "CPRM_CMD_SecureRW: 5\n");
+
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int CPRM_CMD_SecureMultiRW(struct mmc_card *card,
+ unsigned int command,
+ unsigned int dir,
+ unsigned long arg,
+ unsigned char *buff,
+ unsigned int length) {
+
+ int err;
+
+ struct mmc_request mrq;
+ struct mmc_command cmd;
+ struct mmc_command stop;
+ struct mmc_data data;
+ unsigned int timeout_us;
+ unsigned long flags;
+
+ struct scatterlist sg;
+
+ memset(&cmd, 0, sizeof(struct mmc_command));
+ memset(&stop, 0, sizeof(struct mmc_command));
+
+ cmd.opcode = MMC_APP_CMD;
+ cmd.arg = card->rca << 16;
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+
+ err = mmc_wait_for_cmd(card->host, &cmd, 0);
+ if (err)
+ return (u32)-1;
+
+ if (!mmc_host_is_spi(card->host) && !(cmd.resp[0] & R1_APP_CMD))
+ return (u32)-1;
+
+ printk(KERN_DEBUG "CPRM_CMD_SecureRW: 1\n");
+
+ memset(&cmd, 0, sizeof(struct mmc_command));
+
+ cmd.opcode = command;
+
+ if (command == SD_ACMD43_GET_MKB)
+ cmd.arg = arg;
+ else
+ cmd.arg = 0;
+
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ memset(&data, 0, sizeof(struct mmc_data));
+
+ data.timeout_ns = card->csd.tacc_ns * 100;
+ data.timeout_clks = card->csd.tacc_clks * 100;
+
+ timeout_us = data.timeout_ns / 1000;
+ timeout_us += data.timeout_clks * 1000 /
+ (card->host->ios.clock / 1000);
+
+ if (timeout_us > 100000) {
+ data.timeout_ns = 100000000;
+ data.timeout_clks = 0;
+ }
+
+#if defined(CONFIG_TARGET_LOCALE_NTT)
+ data.timeout_ns = 100000000;
+ data.timeout_clks = 0;
+#endif
+
+ data.blksz = 512;
+ data.blocks = (length + 511) / 512;
+
+ data.flags = dir;
+ data.sg = &sg;
+ data.sg_len = 1;
+
+ stop.opcode = MMC_STOP_TRANSMISSION;
+ stop.arg = 0;
+ stop.flags = MMC_RSP_R1B | MMC_CMD_AC;
+
+ memset(&mrq, 0, sizeof(struct mmc_request));
+
+ mrq.cmd = &cmd;
+ mrq.data = &data;
+ mrq.stop = &stop;
+
+
+ printk(KERN_DEBUG "CPRM_CMD_SecureRW: 2\n");
+
+ sg_init_one(&sg, buff, length);
+
+ if (dir == MMC_DATA_WRITE) {
+ local_irq_save(flags);
+ sg_copy_from_buffer(&sg, data.sg_len, buff, length);
+ local_irq_restore(flags);
+ }
+ printk(KERN_DEBUG "CPRM_CMD_SecureRW: 3\n");
+
+ mmc_wait_for_req(card->host, &mrq);
+
+ printk(KERN_DEBUG "CPRM_CMD_SecureRW: 4\n");
+
+ if (cmd.error) {
+ printk(KERN_DEBUG "%s]cmd.error=%d\n", __func__, cmd.error);
+ return cmd.error;
+ }
+
+ if (data.error) {
+ printk(KERN_DEBUG "%s]data.error=%d\n", __func__, data.error);
+ return data.error;
+ }
+
+ err = mmc_wait_busy(card);
+ printk(KERN_DEBUG "CPRM_CMD_SecureRW: 5\n");
+
+ if (dir == MMC_DATA_READ) {
+ local_irq_save(flags);
+ sg_copy_to_buffer(&sg, data.sg_len, buff, length);
+ local_irq_restore(flags);
+ }
+
+ if (err)
+ return err;
+
+ return 0;
+}
+
+
+int stub_sendcmd(struct mmc_card *card,
+ unsigned int cmd,
+ unsigned long arg,
+ unsigned int len,
+ unsigned char *buff) {
+
+ int returnVal = -1;
+ unsigned char *kbuffer = NULL;
+ int direction = 0;
+ int result = 0;
+
+ if (card == NULL) {
+ printk(KERN_DEBUG "stub_sendcmd: card is null error\n");
+ return -ENXIO;
+ }
+
+ kbuffer = kmalloc(len, GFP_KERNEL);
+ if (kbuffer == NULL) {
+ printk(KERN_DEBUG "malloc failed\n");
+ return -ENOMEM;
+ }
+
+ memset(kbuffer, 0x00, len);
+
+ printk(KERN_DEBUG "%s]cmd=0x%x,len=%d\n ", __func__, cmd, len);
+
+ mmc_claim_host(card->host);
+
+ switch (cmd) {
+
+ case ACMD43:
+ direction = MMC_DATA_READ;
+ returnVal = CPRM_CMD_SecureRW(card,
+ SD_ACMD43_GET_MKB,
+ direction,
+ arg,
+ kbuffer,
+ len);
+
+ printk(KERN_DEBUG "SD_ACMD43_GET_MKB:0x%x\n", returnVal);
+ break;
+
+ case ACMD44:
+ direction = MMC_DATA_READ;
+ returnVal = CPRM_CMD_SecureRW(card,
+ SD_ACMD44_GET_MID,
+ direction,
+ 0,
+ kbuffer,
+ len);
+
+ printk(KERN_DEBUG "SD_ACMD44_GET_MID:0x%x\n", returnVal);
+ break;
+
+ case ACMD45:
+ direction = MMC_DATA_WRITE;
+ result = copy_from_user((void *)kbuffer, (void *)buff, len);
+ returnVal = CPRM_CMD_SecureRW(card,
+ SD_ACMD45_SET_CER_RN1,
+ direction,
+ 0,
+ kbuffer,
+ len);
+
+ printk(KERN_DEBUG "SD_ACMD45_SET_CER_RN1:0x%x\n",
+ returnVal);
+ break;
+
+ case ACMD46:
+ direction = MMC_DATA_READ;
+ returnVal = CPRM_CMD_SecureRW(card,
+ SD_ACMD46_GET_CER_RN2,
+ direction,
+ 0,
+ kbuffer,
+ len);
+
+ printk(KERN_DEBUG "SD_ACMD46_GET_CER_RN2:0x%x\n",
+ returnVal);
+ break;
+
+ case ACMD47:
+ direction = MMC_DATA_WRITE;
+ result = copy_from_user((void *)kbuffer, (void *)buff, len);
+ returnVal = CPRM_CMD_SecureRW(card,
+ SD_ACMD47_SET_CER_RES2,
+ direction,
+ 0,
+ kbuffer,
+ len);
+
+ printk(KERN_DEBUG "SD_ACMD47_SET_CER_RES2:0x%x\n",
+ returnVal);
+ break;
+
+ case ACMD48:
+ direction = MMC_DATA_READ;
+ returnVal = CPRM_CMD_SecureRW(card,
+ SD_ACMD48_GET_CER_RES1,
+ direction,
+ 0,
+ kbuffer,
+ len);
+
+ printk(KERN_DEBUG "SD_ACMD48_GET_CER_RES1:0x%x\n",
+ returnVal);
+ break;
+
+ case ACMD25:
+ direction = MMC_DATA_WRITE;
+ result = copy_from_user((void *)kbuffer, (void *)buff, len);
+ returnVal = CPRM_CMD_SecureMultiRW(card,
+ SD_ACMD25_SECURE_WRITE_MULTI_BLOCK,
+ direction,
+ 0,
+ kbuffer,
+ len);
+
+ printk(KERN_DEBUG "SD_ACMD25_SECURE_WRITE_MULTI_BLOCK[%d]=%d\n",
+ len, returnVal);
+ break;
+
+ case ACMD18:
+ direction = MMC_DATA_READ;
+ returnVal = CPRM_CMD_SecureMultiRW(card,
+ SD_ACMD18_SECURE_READ_MULTI_BLOCK,
+ direction,
+ 0,
+ kbuffer,
+ len);
+
+ printk(KERN_DEBUG "SD_ACMD18_SECURE_READ_MULTI_BLOCK [%d]=%d\n",
+ len, returnVal);
+ break;
+
+ case ACMD13:
+ break;
+
+ default:
+ printk(KERN_DEBUG " %s ] : CMD [ %x ] ERROR", __func__, cmd);
+ break;
+ }
+
+ if (returnVal == 0) {
+ if (direction == MMC_DATA_READ)
+ result = copy_to_user((void *)buff,
+ (void *)kbuffer,
+ len);
+
+ result = returnVal;
+ printk(KERN_DEBUG "stub_sendcmd SDAS_E_SUCCESS\n");
+ } else {
+ printk(KERN_DEBUG "stub_sendcmd SDAS_E_FAIL\n");
+ result = -EIO;
+ }
+
+ mmc_release_host(card->host);
+ kfree(kbuffer);
+ return result;
+}
diff --git a/drivers/mmc/card/cprmdrv_samsung.h b/drivers/mmc/card/cprmdrv_samsung.h
new file mode 100644
index 0000000..b07dd6e
--- /dev/null
+++ b/drivers/mmc/card/cprmdrv_samsung.h
@@ -0,0 +1,75 @@
+
+#ifndef __CPRM_API_SAMSUNG
+#define __CPRM_API_SAMSUNG
+
+#define SETRESP(x) (x << 11)
+#define GETRESP(x) ((x >> 11) & 0x0007)
+
+#define NORESP SETRESP(0) /* No response command */
+#define R1RESP SETRESP(1) /* r1 response command */
+#define R1BRESP SETRESP(2) /* r1b response command */
+#define R2RESP SETRESP(3) /* r2 response command */
+#define R3RESP SETRESP(4) /* r3 response command */
+#define R6RESP SETRESP(5) /* r6 response command */
+#define R7RESP SETRESP(6) /* r7 response command */
+
+#define DT 0x8000 /* With data */
+#define DIR_IN 0x0000 /* Data Transfer read */
+#define DIR_OUT 0x4000 /* Data Transfer write */
+#define ACMD 0x0400 /* Is ACMD */
+
+#define ACMD6 (6+R1RESP+ACMD) /* Set Bus Width(SD) */
+#define ACMD13 (13+R1RESP+ACMD+DT+DIR_IN) /* SD Status */
+#define ACMD18 (18+R1RESP+ACMD+DT+DIR_IN) /* Secure Read Multi Block */
+#define ACMD22 (22+R1RESP+ACMD+DT+DIR_IN) /* Send Number Write block */
+#define ACMD23 (23+R1RESP+ACMD) /* Set Write block Erase Count */
+#define ACMD25 (25+R1RESP+ACMD+DT+DIR_OUT) /* Secure Write Multiple Block */
+#define ACMD26 (26+R1RESP+ACMD+DT+DIR_OUT) /* Secure Write MKB */
+#define ACMD38 (38+R1BRESP+ACMD) /* Secure Erase */
+#define ACMD41 (41+R3RESP+ACMD) /* Send App Operating Condition */
+#define ACMD42 (42+R1RESP+ACMD) /* Set Clear Card Detect */
+#define ACMD43 (43+R1RESP+ACMD+DT+DIR_IN) /* Get MKB */
+#define ACMD44 (44+R1RESP+ACMD+DT+DIR_IN) /* Get MID */
+#define ACMD45 (45+R1RESP+ACMD+DT+DIR_OUT) /* Set CER RN1 */
+#define ACMD46 (46+R1RESP+ACMD+DT+DIR_IN) /* Get CER RN2 */
+#define ACMD47 (47+R1RESP+ACMD+DT+DIR_OUT) /* Set CER RES2 */
+#define ACMD48 (48+R1RESP+ACMD+DT+DIR_IN) /* Get CER RES1 */
+#define ACMD49 (49+R1BRESP+ACMD) /* Change Erase Area */
+#define ACMD51 (51+R1RESP+ACMD+DT+DIR_IN) /* Send SCR */
+
+/* Application-specific commands supported by all SD cards */
+enum SD_ACMD {
+SD_ACMD6_SET_BUS_WIDTH = 6,
+SD_ACMD13_SD_STATUS = 13,
+SD_ACMD18_SECURE_READ_MULTI_BLOCK = 18,
+SD_ACMD22_SEND_NUM_WR_BLOCKS = 22,
+SD_ACMD23_SET_WR_BLK_ERASE_COUNT = 23,
+SD_ACMD25_SECURE_WRITE_MULTI_BLOCK = 25,
+SD_ACMD26_SECURE_WRITE_MKB = 26,
+SD_ACMD38_SECURE_ERASE = 38,
+SD_ACMD41_SD_APP_OP_COND = 41,
+SD_ACMD42_SET_CLR_CARD_DETECT = 42,
+SD_ACMD43_GET_MKB = 43,
+SD_ACMD44_GET_MID = 44,
+SD_ACMD45_SET_CER_RN1 = 45,
+SD_ACMD46_GET_CER_RN2 = 46,
+SD_ACMD47_SET_CER_RES2 = 47,
+SD_ACMD48_GET_CER_RES1 = 48,
+SD_ACMD49_CHANGE_SECURE_AREA = 49,
+SD_ACMD51_SEND_SCR = 51
+};
+
+struct cprm_request {
+ unsigned int cmd;
+ unsigned long arg;
+ unsigned char *buff;
+ unsigned int len;
+};
+
+int stub_sendcmd(struct mmc_card *card,
+ unsigned int cmd,
+ unsigned long arg,
+ unsigned int len,
+ unsigned char *buff);
+
+#endif /* __CPRM_API_SAMSUNG */
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 6413afa..bf5d183 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -29,6 +29,8 @@
*/
static int mmc_prep_request(struct request_queue *q, struct request *req)
{
+ struct mmc_queue *mq = q->queuedata;
+
/*
* We only like normal block requests and discards.
*/
@@ -37,6 +39,9 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
return BLKPREP_KILL;
}
+ if (mq && mmc_card_removed(mq->card))
+ return BLKPREP_KILL;
+
req->cmd_flags |= REQ_DONTPREP;
return BLKPREP_OK;
@@ -52,14 +57,18 @@ static int mmc_queue_thread(void *d)
down(&mq->thread_sem);
do {
struct request *req = NULL;
+ struct mmc_queue_req *tmp;
spin_lock_irq(q->queue_lock);
set_current_state(TASK_INTERRUPTIBLE);
req = blk_fetch_request(q);
- mq->req = req;
+ mq->mqrq_cur->req = req;
spin_unlock_irq(q->queue_lock);
- if (!req) {
+ if (req || mq->mqrq_prev->req) {
+ set_current_state(TASK_RUNNING);
+ mq->issue_fn(mq, req);
+ } else {
if (kthread_should_stop()) {
set_current_state(TASK_RUNNING);
break;
@@ -67,11 +76,14 @@ static int mmc_queue_thread(void *d)
up(&mq->thread_sem);
schedule();
down(&mq->thread_sem);
- continue;
}
- set_current_state(TASK_RUNNING);
- mq->issue_fn(mq, req);
+ /* Current request becomes previous request and vice versa. */
+ mq->mqrq_prev->brq.mrq.data = NULL;
+ mq->mqrq_prev->req = NULL;
+ tmp = mq->mqrq_prev;
+ mq->mqrq_prev = mq->mqrq_cur;
+ mq->mqrq_cur = tmp;
} while (1);
up(&mq->thread_sem);
@@ -97,10 +109,46 @@ static void mmc_request(struct request_queue *q)
return;
}
- if (!mq->req)
+ if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
wake_up_process(mq->thread);
}
+static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
+{
+ struct scatterlist *sg;
+
+ sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
+ if (!sg)
+ *err = -ENOMEM;
+ else {
+ *err = 0;
+ sg_init_table(sg, sg_len);
+ }
+
+ return sg;
+}
+
+static void mmc_queue_setup_discard(struct request_queue *q,
+ struct mmc_card *card)
+{
+ unsigned max_discard;
+
+ max_discard = mmc_calc_max_discard(card);
+ if (!max_discard)
+ return;
+
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
+ q->limits.max_discard_sectors = max_discard;
+ if (card->erased_byte == 0)
+ q->limits.discard_zeroes_data = 1;
+ q->limits.discard_granularity = card->pref_erase << 9;
+ /* granularity must not be greater than max. discard */
+ if (card->pref_erase > max_discard)
+ q->limits.discard_granularity = 0;
+ if (mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))
+ queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
+}
+
/**
* mmc_init_queue - initialise a queue structure.
* @mq: mmc queue
@@ -116,6 +164,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
struct mmc_host *host = card->host;
u64 limit = BLK_BOUNCE_HIGH;
int ret;
+ struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
+ struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
limit = *mmc_dev(host)->dma_mask;
@@ -125,21 +175,18 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
if (!mq->queue)
return -ENOMEM;
+ memset(&mq->mqrq_cur, 0, sizeof(mq->mqrq_cur));
+ memset(&mq->mqrq_prev, 0, sizeof(mq->mqrq_prev));
+ INIT_LIST_HEAD(&mqrq_cur->packed_list);
+ INIT_LIST_HEAD(&mqrq_prev->packed_list);
+ mq->mqrq_cur = mqrq_cur;
+ mq->mqrq_prev = mqrq_prev;
mq->queue->queuedata = mq;
- mq->req = NULL;
blk_queue_prep_rq(mq->queue, mmc_prep_request);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
- if (mmc_can_erase(card)) {
- queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mq->queue);
- mq->queue->limits.max_discard_sectors = UINT_MAX;
- if (card->erased_byte == 0)
- mq->queue->limits.discard_zeroes_data = 1;
- mq->queue->limits.discard_granularity = card->pref_erase << 9;
- if (mmc_can_secure_erase_trim(card))
- queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD,
- mq->queue);
- }
+ if (mmc_can_erase(card))
+ mmc_queue_setup_discard(mq->queue, card);
#ifdef CONFIG_MMC_BLOCK_BOUNCE
if (host->max_segs == 1) {
@@ -155,53 +202,64 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
bouncesz = host->max_blk_count * 512;
if (bouncesz > 512) {
- mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
- if (!mq->bounce_buf) {
+ mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
+ if (!mqrq_cur->bounce_buf) {
printk(KERN_WARNING "%s: unable to "
- "allocate bounce buffer\n",
+ "allocate bounce cur buffer\n",
mmc_card_name(card));
}
+ mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
+ if (!mqrq_prev->bounce_buf) {
+ printk(KERN_WARNING "%s: unable to "
+ "allocate bounce prev buffer\n",
+ mmc_card_name(card));
+ kfree(mqrq_cur->bounce_buf);
+ mqrq_cur->bounce_buf = NULL;
+ }
}
- if (mq->bounce_buf) {
+ if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
blk_queue_max_segments(mq->queue, bouncesz / 512);
blk_queue_max_segment_size(mq->queue, bouncesz);
- mq->sg = kmalloc(sizeof(struct scatterlist),
- GFP_KERNEL);
- if (!mq->sg) {
- ret = -ENOMEM;
+ mqrq_cur->sg = mmc_alloc_sg(1, &ret);
+ if (ret)
goto cleanup_queue;
- }
- sg_init_table(mq->sg, 1);
- mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
- bouncesz / 512, GFP_KERNEL);
- if (!mq->bounce_sg) {
- ret = -ENOMEM;
+ mqrq_cur->bounce_sg =
+ mmc_alloc_sg(bouncesz / 512, &ret);
+ if (ret)
+ goto cleanup_queue;
+
+ mqrq_prev->sg = mmc_alloc_sg(1, &ret);
+ if (ret)
+ goto cleanup_queue;
+
+ mqrq_prev->bounce_sg =
+ mmc_alloc_sg(bouncesz / 512, &ret);
+ if (ret)
goto cleanup_queue;
- }
- sg_init_table(mq->bounce_sg, bouncesz / 512);
}
}
#endif
- if (!mq->bounce_buf) {
+ if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
blk_queue_bounce_limit(mq->queue, limit);
blk_queue_max_hw_sectors(mq->queue,
min(host->max_blk_count, host->max_req_size / 512));
blk_queue_max_segments(mq->queue, host->max_segs);
blk_queue_max_segment_size(mq->queue, host->max_seg_size);
- mq->sg = kmalloc(sizeof(struct scatterlist) *
- host->max_segs, GFP_KERNEL);
- if (!mq->sg) {
- ret = -ENOMEM;
+ mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
+ if (ret)
+ goto cleanup_queue;
+
+
+ mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
+ if (ret)
goto cleanup_queue;
- }
- sg_init_table(mq->sg, host->max_segs);
}
sema_init(&mq->thread_sem, 1);
@@ -216,16 +274,22 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
return 0;
free_bounce_sg:
- if (mq->bounce_sg)
- kfree(mq->bounce_sg);
- mq->bounce_sg = NULL;
+ kfree(mqrq_cur->bounce_sg);
+ mqrq_cur->bounce_sg = NULL;
+ kfree(mqrq_prev->bounce_sg);
+ mqrq_prev->bounce_sg = NULL;
+
cleanup_queue:
- if (mq->sg)
- kfree(mq->sg);
- mq->sg = NULL;
- if (mq->bounce_buf)
- kfree(mq->bounce_buf);
- mq->bounce_buf = NULL;
+ kfree(mqrq_cur->sg);
+ mqrq_cur->sg = NULL;
+ kfree(mqrq_cur->bounce_buf);
+ mqrq_cur->bounce_buf = NULL;
+
+ kfree(mqrq_prev->sg);
+ mqrq_prev->sg = NULL;
+ kfree(mqrq_prev->bounce_buf);
+ mqrq_prev->bounce_buf = NULL;
+
blk_cleanup_queue(mq->queue);
return ret;
}
@@ -234,6 +298,8 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
{
struct request_queue *q = mq->queue;
unsigned long flags;
+ struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
+ struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
/* Make sure the queue isn't suspended, as that will deadlock */
mmc_queue_resume(mq);
@@ -247,16 +313,23 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
blk_start_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
- if (mq->bounce_sg)
- kfree(mq->bounce_sg);
- mq->bounce_sg = NULL;
+ kfree(mqrq_cur->bounce_sg);
+ mqrq_cur->bounce_sg = NULL;
+
+ kfree(mqrq_cur->sg);
+ mqrq_cur->sg = NULL;
+
+ kfree(mqrq_cur->bounce_buf);
+ mqrq_cur->bounce_buf = NULL;
- kfree(mq->sg);
- mq->sg = NULL;
+ kfree(mqrq_prev->bounce_sg);
+ mqrq_prev->bounce_sg = NULL;
- if (mq->bounce_buf)
- kfree(mq->bounce_buf);
- mq->bounce_buf = NULL;
+ kfree(mqrq_prev->sg);
+ mqrq_prev->sg = NULL;
+
+ kfree(mqrq_prev->bounce_buf);
+ mqrq_prev->bounce_buf = NULL;
mq->card = NULL;
}
@@ -306,30 +379,70 @@ void mmc_queue_resume(struct mmc_queue *mq)
}
}
+static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq,
+ struct mmc_queue_req *mqrq,
+ struct scatterlist *sg)
+{
+ struct scatterlist *__sg;
+ unsigned int sg_len = 0;
+ struct request *req;
+ enum mmc_packed_cmd cmd;
+
+ cmd = mqrq->packed_cmd;
+
+ if (cmd == MMC_PACKED_WR_HDR || cmd == MMC_PACKED_WRITE) {
+ __sg = sg;
+ sg_set_buf(__sg, mqrq->packed_cmd_hdr,
+ sizeof(mqrq->packed_cmd_hdr));
+ sg_len++;
+ if (cmd == MMC_PACKED_WR_HDR) {
+ sg_mark_end(__sg);
+ return sg_len;
+ }
+ __sg->page_link &= ~0x02;
+ }
+
+ __sg = sg + sg_len;
+ list_for_each_entry(req, &mqrq->packed_list, queuelist) {
+ sg_len += blk_rq_map_sg(mq->queue, req, __sg);
+ __sg = sg + (sg_len - 1);
+ (__sg++)->page_link &= ~0x02;
+ }
+ sg_mark_end(sg + (sg_len - 1));
+ return sg_len;
+}
+
/*
* Prepare the sg list(s) to be handed of to the host driver
*/
-unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
+unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
{
unsigned int sg_len;
size_t buflen;
struct scatterlist *sg;
int i;
- if (!mq->bounce_buf)
- return blk_rq_map_sg(mq->queue, mq->req, mq->sg);
+ if (!mqrq->bounce_buf) {
+ if (!list_empty(&mqrq->packed_list))
+ return mmc_queue_packed_map_sg(mq, mqrq, mqrq->sg);
+ else
+ return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
+ }
- BUG_ON(!mq->bounce_sg);
+ BUG_ON(!mqrq->bounce_sg);
- sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg);
+ if (!list_empty(&mqrq->packed_list))
+ sg_len = mmc_queue_packed_map_sg(mq, mqrq, mqrq->bounce_sg);
+ else
+ sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
- mq->bounce_sg_len = sg_len;
+ mqrq->bounce_sg_len = sg_len;
buflen = 0;
- for_each_sg(mq->bounce_sg, sg, sg_len, i)
+ for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
buflen += sg->length;
- sg_init_one(mq->sg, mq->bounce_buf, buflen);
+ sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
return 1;
}
@@ -338,31 +451,30 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
* If writing, bounce the data to the buffer before the request
* is sent to the host driver
*/
-void mmc_queue_bounce_pre(struct mmc_queue *mq)
+void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
{
- if (!mq->bounce_buf)
+ if (!mqrq->bounce_buf)
return;
- if (rq_data_dir(mq->req) != WRITE)
+ if (rq_data_dir(mqrq->req) != WRITE)
return;
- sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len,
- mq->bounce_buf, mq->sg[0].length);
+ sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
+ mqrq->bounce_buf, mqrq->sg[0].length);
}
/*
* If reading, bounce the data from the buffer after the request
* has been handled by the host driver
*/
-void mmc_queue_bounce_post(struct mmc_queue *mq)
+void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
{
- if (!mq->bounce_buf)
+ if (!mqrq->bounce_buf)
return;
- if (rq_data_dir(mq->req) != READ)
+ if (rq_data_dir(mqrq->req) != READ)
return;
- sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len,
- mq->bounce_buf, mq->sg[0].length);
+ sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
+ mqrq->bounce_buf, mqrq->sg[0].length);
}
-
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index 6223ef8..be58b3c 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -4,19 +4,48 @@
struct request;
struct task_struct;
+struct mmc_blk_request {
+ struct mmc_request mrq;
+ struct mmc_command sbc;
+ struct mmc_command cmd;
+ struct mmc_command stop;
+ struct mmc_data data;
+};
+
+enum mmc_packed_cmd {
+ MMC_PACKED_NONE = 0,
+ MMC_PACKED_WR_HDR,
+ MMC_PACKED_WRITE,
+ MMC_PACKED_READ,
+};
+
+struct mmc_queue_req {
+ struct request *req;
+ struct mmc_blk_request brq;
+ struct scatterlist *sg;
+ char *bounce_buf;
+ struct scatterlist *bounce_sg;
+ unsigned int bounce_sg_len;
+ struct mmc_async_req mmc_active;
+ struct list_head packed_list;
+ u32 packed_cmd_hdr[128];
+ unsigned int packed_blocks;
+ enum mmc_packed_cmd packed_cmd;
+ int packed_fail_idx;
+ u8 packed_num;
+};
+
struct mmc_queue {
struct mmc_card *card;
struct task_struct *thread;
struct semaphore thread_sem;
unsigned int flags;
- struct request *req;
int (*issue_fn)(struct mmc_queue *, struct request *);
void *data;
struct request_queue *queue;
- struct scatterlist *sg;
- char *bounce_buf;
- struct scatterlist *bounce_sg;
- unsigned int bounce_sg_len;
+ struct mmc_queue_req mqrq[2];
+ struct mmc_queue_req *mqrq_cur;
+ struct mmc_queue_req *mqrq_prev;
};
extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
@@ -25,8 +54,9 @@ extern void mmc_cleanup_queue(struct mmc_queue *);
extern void mmc_queue_suspend(struct mmc_queue *);
extern void mmc_queue_resume(struct mmc_queue *);
-extern unsigned int mmc_queue_map_sg(struct mmc_queue *);
-extern void mmc_queue_bounce_pre(struct mmc_queue *);
-extern void mmc_queue_bounce_post(struct mmc_queue *);
+extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
+ struct mmc_queue_req *);
+extern void mmc_queue_bounce_pre(struct mmc_queue_req *);
+extern void mmc_queue_bounce_post(struct mmc_queue_req *);
#endif
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index ef10387..cc83b0a 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -27,3 +27,33 @@ config MMC_CLKGATE
support handling this in order for it to be of any use.
If unsure, say N.
+
+config MMC_EMBEDDED_SDIO
+ boolean "MMC embedded SDIO device support (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ help
+ If you say Y here, support will be added for embedded SDIO
+ devices which do not contain the necessary enumeration
+ support in hardware to be properly detected.
+
+config MMC_PARANOID_SD_INIT
+ bool "Enable paranoid SD card initialization (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ help
+ If you say Y here, the MMC layer will be extra paranoid
+ about re-trying SD init requests. This can be a useful
+ work-around for buggy controllers and hardware. Enable
+ if you are experiencing issues with SD detection.
+
+config MMC_NOT_USE_SANITIZE
+ bool "Disable SANITIZE emmc4.5 feature (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ help
+ If you say Y here, sanitize feature will be disable.
+
+config MMC_POLLING_WAIT_CMD23
+ bool "Wait for cmd23's done interrupt by polling check (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ help
+ If you say Y here, cmd23 dose not use interrupt.
+
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 393d817..eb7a4c8 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -120,18 +120,19 @@ static int mmc_bus_remove(struct device *dev)
return 0;
}
-static int mmc_bus_suspend(struct device *dev, pm_message_t state)
+static int mmc_bus_pm_suspend(struct device *dev)
{
struct mmc_driver *drv = to_mmc_driver(dev->driver);
struct mmc_card *card = mmc_dev_to_card(dev);
int ret = 0;
+ pm_message_t state = { PM_EVENT_SUSPEND };
if (dev->driver && drv->suspend)
ret = drv->suspend(card, state);
return ret;
}
-static int mmc_bus_resume(struct device *dev)
+static int mmc_bus_pm_resume(struct device *dev)
{
struct mmc_driver *drv = to_mmc_driver(dev->driver);
struct mmc_card *card = mmc_dev_to_card(dev);
@@ -143,7 +144,6 @@ static int mmc_bus_resume(struct device *dev)
}
#ifdef CONFIG_PM_RUNTIME
-
static int mmc_runtime_suspend(struct device *dev)
{
struct mmc_card *card = mmc_dev_to_card(dev);
@@ -162,21 +162,13 @@ static int mmc_runtime_idle(struct device *dev)
{
return pm_runtime_suspend(dev);
}
+#endif /* CONFIG_PM_RUNTIME */
static const struct dev_pm_ops mmc_bus_pm_ops = {
- .runtime_suspend = mmc_runtime_suspend,
- .runtime_resume = mmc_runtime_resume,
- .runtime_idle = mmc_runtime_idle,
+ SET_SYSTEM_SLEEP_PM_OPS(mmc_bus_pm_suspend, mmc_bus_pm_resume)
+ SET_RUNTIME_PM_OPS(mmc_runtime_suspend, mmc_runtime_resume, mmc_runtime_idle)
};
-#define MMC_PM_OPS_PTR (&mmc_bus_pm_ops)
-
-#else /* !CONFIG_PM_RUNTIME */
-
-#define MMC_PM_OPS_PTR NULL
-
-#endif /* !CONFIG_PM_RUNTIME */
-
static struct bus_type mmc_bus_type = {
.name = "mmc",
.dev_attrs = mmc_dev_attrs,
@@ -184,9 +176,7 @@ static struct bus_type mmc_bus_type = {
.uevent = mmc_bus_uevent,
.probe = mmc_bus_probe,
.remove = mmc_bus_remove,
- .suspend = mmc_bus_suspend,
- .resume = mmc_bus_resume,
- .pm = MMC_PM_OPS_PTR,
+ .pm = &mmc_bus_pm_ops,
};
int mmc_register_bus(void)
@@ -301,10 +291,11 @@ int mmc_add_card(struct mmc_card *card)
mmc_card_ddr_mode(card) ? "DDR " : "",
type);
} else {
- printk(KERN_INFO "%s: new %s%s%s card at address %04x\n",
+ pr_info("%s: new %s%s%s%s card at address %04x\n",
mmc_hostname(card->host),
- mmc_sd_card_uhs(card) ? "ultra high speed " :
+ mmc_card_uhs(card) ? "ultra high speed " :
(mmc_card_highspeed(card) ? "high speed " : ""),
+ (mmc_card_hs200(card) ? "HS200 " : ""),
mmc_card_ddr_mode(card) ? "DDR " : "",
type, card->rca);
}
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 75db30e6..8051835 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -23,6 +23,7 @@
#include <linux/log2.h>
#include <linux/regulator/consumer.h>
#include <linux/pm_runtime.h>
+#include <linux/wakelock.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
@@ -100,7 +101,7 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
cmd->retries = 0;
}
- if (err && cmd->retries) {
+ if (err && cmd->retries && !mmc_card_removed(host->card)) {
pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
mmc_hostname(host), cmd->opcode, err);
@@ -198,9 +199,219 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
static void mmc_wait_done(struct mmc_request *mrq)
{
- complete(mrq->done_data);
+ complete(&mrq->completion);
}
+static void __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
+{
+ init_completion(&mrq->completion);
+ mrq->done = mmc_wait_done;
+
+ if (mmc_card_removed(host->card)) {
+ mrq->cmd->error = -ENOMEDIUM;
+ complete(&mrq->completion);
+ return;
+ }
+
+#if (defined(CONFIG_MIDAS_COMMON) && !defined(CONFIG_EXYNOS4_DEV_DWMCI)) || \
+ defined(CONFIG_MACH_U1) || defined(CONFIG_MACH_SLP_NAPLES)
+#ifndef CONFIG_MMC_POLLING_WAIT_CMD23
+
+ if(mrq->sbc) {
+ struct mmc_request tmp_mrq;
+
+ memcpy(&tmp_mrq, mrq, sizeof(struct mmc_request));
+
+ /* send cmd 23 first */
+ mrq->cmd = mrq->sbc;
+ mrq->data = 0;
+ mmc_start_request(host, mrq);
+
+ /* wait for cmd 23 complete */
+ wait_for_completion(&mrq->completion);
+
+ /* check that cmd23 is done well */
+ if(mrq->cmd->error) {
+ /* there were an error while cmd23 was doing */
+ mrq->sbc = mrq->cmd;
+ mrq->cmd = tmp_mrq.cmd;
+ mrq->data = tmp_mrq.data;
+ return;
+ }
+ /* send R/W command */
+ init_completion(&mrq->completion);
+ mrq->sbc = mrq->cmd;
+ mrq->cmd = tmp_mrq.cmd;
+ mrq->data = tmp_mrq.data;
+ mmc_start_request(host, mrq);
+ } else
+#endif
+#endif
+ mmc_start_request(host, mrq);
+}
+
+static inline void mmc_set_ios(struct mmc_host *host);
+static void mmc_power_up(struct mmc_host *host);
+static void mmc_wait_for_req_done(struct mmc_host *host,
+ struct mmc_request *mrq)
+{
+ struct mmc_command *cmd;
+#if (defined(CONFIG_MIDAS_COMMON) && !defined(CONFIG_EXYNOS4_DEV_DWMCI))
+#ifndef CONFIG_MMC_POLLING_WAIT_CMD23
+ if(mrq->sbc && mrq->sbc->error) {
+ /* if an sbc error exists, do not wait completion.
+ completion is already called.
+ nothing to do at this condition. */
+ } else
+#endif
+#endif
+ wait_for_completion(&mrq->completion);
+
+ cmd = mrq->cmd;
+ if (!cmd->error || !cmd->retries ||
+ mmc_card_removed(host->card))
+ return;
+
+ /* if card is mmc type and nonremovable, and there are erros after
+ issuing r/w command, then init eMMC and mshc */
+ if (((host->card) && mmc_card_mmc(host->card) && \
+ (host->caps & MMC_CAP_NONREMOVABLE)) && \
+ (mrq->cmd->error == -ENOTRECOVERABLE || \
+ ((mrq->cmd->opcode == 17 || mrq->cmd->opcode == 18) && \
+ ((mrq->data->error) || mrq->cmd->error || \
+ (mrq->sbc && mrq->sbc->error))))) {
+ int rt_err = -1,count = 3;
+
+ printk(KERN_ERR "%s: it occurs a critical error on eMMC "
+ "it'll try to recover eMMC to normal state\n",
+ mmc_hostname(host));
+ do {
+ /* these errors mean eMMC gets abnormal state.
+ to recover eMMC to be normal, it has to reset eMMC.
+ first of all, it stops to power to eMMC over 10ms.*/
+ if (host->ops->init_card) {
+ host->ops->init_card(host, host->card);
+ }
+ /* re-init eMMC card */
+ if (host->bus_ops && !host->bus_dead) {
+ /* to init mshc */
+ host->ios.power_mode = MMC_POWER_OFF;
+ host->ios.bus_width = MMC_BUS_WIDTH_1;
+ host->ios.timing = MMC_TIMING_LEGACY;
+ mmc_set_ios(host);
+ mmc_power_up(host);
+
+ /* to init eMMC */
+ if( host->bus_ops->resume )
+ rt_err = host->bus_ops->resume(host);
+ }
+ count--;
+ } while(count && rt_err);
+
+ if (rt_err) {
+ printk(KERN_ERR "%s: it has failed to recover eMMC\n",
+ mmc_hostname(host));
+ } else {
+ printk(KERN_INFO "%s: recovering eMMC has been done\n",
+ mmc_hostname(host));
+ }
+
+ }
+}
+
+/**
+ * mmc_pre_req - Prepare for a new request
+ * @host: MMC host to prepare command
+ * @mrq: MMC request to prepare for
+ * @is_first_req: true if there is no previous started request
+ * that may run in parellel to this call, otherwise false
+ *
+ * mmc_pre_req() is called in prior to mmc_start_req() to let
+ * host prepare for the new request. Preparation of a request may be
+ * performed while another request is running on the host.
+ */
+static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
+ bool is_first_req)
+{
+ if (host->ops->pre_req) {
+ mmc_host_clk_hold(host);
+ host->ops->pre_req(host, mrq, is_first_req);
+ mmc_host_clk_release(host);
+ }
+}
+
+/**
+ * mmc_post_req - Post process a completed request
+ * @host: MMC host to post process command
+ * @mrq: MMC request to post process for
+ * @err: Error, if non zero, clean up any resources made in pre_req
+ *
+ * Let the host post process a completed request. Post processing of
+ * a request may be performed while another reuqest is running.
+ */
+static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
+ int err)
+{
+ if (host->ops->post_req) {
+ mmc_host_clk_hold(host);
+ host->ops->post_req(host, mrq, err);
+ mmc_host_clk_release(host);
+ }
+}
+
+/**
+ * mmc_start_req - start a non-blocking request
+ * @host: MMC host to start command
+ * @areq: async request to start
+ * @error: out parameter returns 0 for success, otherwise non zero
+ *
+ * Start a new MMC custom command request for a host.
+ * If there is on ongoing async request wait for completion
+ * of that request and start the new one and return.
+ * Does not wait for the new request to complete.
+ *
+ * Returns the completed request, NULL in case of none completed.
+ * Wait for the an ongoing request (previoulsy started) to complete and
+ * return the completed request. If there is no ongoing request, NULL
+ * is returned without waiting. NULL is not an error condition.
+ */
+struct mmc_async_req *mmc_start_req(struct mmc_host *host,
+ struct mmc_async_req *areq, int *error)
+{
+ int err = 0;
+ struct mmc_async_req *data = host->areq;
+
+ /* Prepare a new request */
+ if (areq)
+ mmc_pre_req(host, areq->mrq, !host->areq);
+
+ if (host->areq) {
+ mmc_wait_for_req_done(host, host->areq->mrq);
+ err = host->areq->err_check(host->card, host->areq);
+ if (err) {
+ mmc_post_req(host, host->areq->mrq, 0);
+ if (areq)
+ mmc_post_req(host, areq->mrq, -EINVAL);
+
+ host->areq = NULL;
+ goto out;
+ }
+ }
+
+ if (areq)
+ __mmc_start_req(host, areq->mrq);
+
+ if (host->areq)
+ mmc_post_req(host, host->areq->mrq, 0);
+
+ host->areq = areq;
+ out:
+ if (error)
+ *error = err;
+ return data;
+}
+EXPORT_SYMBOL(mmc_start_req);
+
/**
* mmc_wait_for_req - start a request and wait for completion
* @host: MMC host to start command
@@ -212,17 +423,67 @@ static void mmc_wait_done(struct mmc_request *mrq)
*/
void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)
{
- DECLARE_COMPLETION_ONSTACK(complete);
+ __mmc_start_req(host, mrq);
+ mmc_wait_for_req_done(host, mrq);
+}
+EXPORT_SYMBOL(mmc_wait_for_req);
- mrq->done_data = &complete;
- mrq->done = mmc_wait_done;
+/**
+ * mmc_interrupt_hpi - Issue for High priority Interrupt
+ * @card: the MMC card associated with the HPI transfer
+ *
+ * Issued High Priority Interrupt, and check for card status
+ * util out-of prg-state.
+ */
+int mmc_interrupt_hpi(struct mmc_card *card)
+{
+ int err;
+ u32 status;
- mmc_start_request(host, mrq);
+ BUG_ON(!card);
- wait_for_completion(&complete);
-}
+ if (!card->ext_csd.hpi_en) {
+ pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host));
+ return 1;
+ }
-EXPORT_SYMBOL(mmc_wait_for_req);
+ mmc_claim_host(card->host);
+ err = mmc_send_status(card, &status);
+ if (err) {
+ pr_err("%s: Get card status fail\n", mmc_hostname(card->host));
+ goto out;
+ }
+
+ /*
+ * If the card status is in PRG-state, we can send the HPI command.
+ */
+ if (R1_CURRENT_STATE(status) == R1_STATE_PRG) {
+ do {
+ /*
+ * We don't know when the HPI command will finish
+ * processing, so we need to resend HPI until out
+ * of prg-state, and keep checking the card status
+ * with SEND_STATUS. If a timeout error occurs when
+ * sending the HPI command, we are already out of
+ * prg-state.
+ */
+ err = mmc_send_hpi_cmd(card, &status);
+ if (err)
+ pr_debug("%s: abort HPI (%d error)\n",
+ mmc_hostname(card->host), err);
+
+ err = mmc_send_status(card, &status);
+ if (err)
+ break;
+ } while (R1_CURRENT_STATE(status) == R1_STATE_PRG);
+ } else
+ pr_debug("%s: Left prg-state\n", mmc_hostname(card->host));
+
+out:
+ mmc_release_host(card->host);
+ return err;
+}
+EXPORT_SYMBOL(mmc_interrupt_hpi);
/**
* mmc_wait_for_cmd - start a command and wait for completion
@@ -390,7 +651,9 @@ int mmc_host_enable(struct mmc_host *host)
int err;
host->en_dis_recurs = 1;
+ mmc_host_clk_hold(host);
err = host->ops->enable(host);
+ mmc_host_clk_release(host);
host->en_dis_recurs = 0;
if (err) {
@@ -410,7 +673,9 @@ static int mmc_host_do_disable(struct mmc_host *host, int lazy)
int err;
host->en_dis_recurs = 1;
+ mmc_host_clk_hold(host);
err = host->ops->disable(host, lazy);
+ mmc_host_clk_release(host);
host->en_dis_recurs = 0;
if (err < 0) {
@@ -973,8 +1238,11 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11
host->ios.signal_voltage = signal_voltage;
- if (host->ops->start_signal_voltage_switch)
+ if (host->ops->start_signal_voltage_switch) {
+ mmc_host_clk_hold(host);
err = host->ops->start_signal_voltage_switch(host, &host->ios);
+ mmc_host_clk_release(host);
+ }
return err;
}
@@ -1001,6 +1269,46 @@ void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type)
mmc_host_clk_release(host);
}
+static void mmc_poweroff_notify(struct mmc_host *host)
+{
+ struct mmc_card *card;
+ unsigned int timeout;
+ unsigned int notify_type = EXT_CSD_NO_POWER_NOTIFICATION;
+ int err = 0;
+
+ card = host->card;
+
+ /*
+ * Send power notify command only if card
+ * is mmc and notify state is powered ON
+ */
+ if (card && mmc_card_mmc(card) &&
+ (card->poweroff_notify_state == MMC_POWERED_ON)) {
+
+ if (host->power_notify_type == MMC_HOST_PW_NOTIFY_SHORT) {
+ notify_type = EXT_CSD_POWER_OFF_SHORT;
+ timeout = card->ext_csd.generic_cmd6_time;
+ card->poweroff_notify_state = MMC_POWEROFF_SHORT;
+ } else {
+ notify_type = EXT_CSD_POWER_OFF_LONG;
+ timeout = card->ext_csd.power_off_longtime;
+ card->poweroff_notify_state = MMC_POWEROFF_LONG;
+ }
+
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_POWER_OFF_NOTIFICATION,
+ notify_type, timeout);
+
+ if (err && err != -EBADMSG)
+ pr_err("Device failed to respond within %d poweroff "
+ "time. Forcefully powering down the device\n",
+ timeout);
+
+ /* Set the card state to no notification after the poweroff */
+ card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION;
+ }
+}
+
/*
* Apply power to the MMC stack. This is a two-stage process.
* First, we enable power to the card without the clock running.
@@ -1064,6 +1372,8 @@ void mmc_power_off(struct mmc_host *host)
host->ios.clock = 0;
host->ios.vdd = 0;
+ mmc_poweroff_notify(host);
+
/*
* Reset ocr mask to be the highest possible voltage supported for
* this mmc host. This value will be used at next power up.
@@ -1121,6 +1431,36 @@ static inline void mmc_bus_put(struct mmc_host *host)
spin_unlock_irqrestore(&host->lock, flags);
}
+int mmc_resume_bus(struct mmc_host *host)
+{
+ unsigned long flags;
+
+ if (!mmc_bus_needs_resume(host))
+ return -EINVAL;
+
+ printk("%s: Starting deferred resume\n", mmc_hostname(host));
+ spin_lock_irqsave(&host->lock, flags);
+ host->bus_resume_flags &= ~MMC_BUSRESUME_NEEDS_RESUME;
+ host->rescan_disable = 0;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ mmc_bus_get(host);
+ if (host->bus_ops && !host->bus_dead) {
+ mmc_power_up(host);
+ BUG_ON(!host->bus_ops->resume);
+ host->bus_ops->resume(host);
+ }
+
+ if (host->bus_ops->detect && !host->bus_dead)
+ host->bus_ops->detect(host);
+
+ mmc_bus_put(host);
+ printk("%s: Deferred resume completed\n", mmc_hostname(host));
+ return 0;
+}
+
+EXPORT_SYMBOL(mmc_resume_bus);
+
/*
* Assign a mmc bus handler to a host. Only one bus handler may control a
* host at any given time.
@@ -1186,6 +1526,8 @@ void mmc_detect_change(struct mmc_host *host, unsigned long delay)
spin_unlock_irqrestore(&host->lock, flags);
#endif
+ host->detect_change = 1;
+ wake_lock(&host->detect_wake_lock);
mmc_schedule_delayed_work(&host->detect, delay);
}
@@ -1383,7 +1725,7 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
if (err) {
printk(KERN_ERR "mmc_erase: group start error %d, "
"status %#x\n", err, cmd.resp[0]);
- err = -EINVAL;
+ err = -EIO;
goto out;
}
@@ -1398,7 +1740,7 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,
if (err) {
printk(KERN_ERR "mmc_erase: group end error %d, status %#x\n",
err, cmd.resp[0]);
- err = -EINVAL;
+ err = -EIO;
goto out;
}
@@ -1517,10 +1859,34 @@ int mmc_can_trim(struct mmc_card *card)
{
if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN)
return 1;
+ if (mmc_can_discard(card))
+ return 1;
return 0;
}
EXPORT_SYMBOL(mmc_can_trim);
+int mmc_can_discard(struct mmc_card *card)
+{
+ /*
+ * As there's no way to detect the discard support bit at v4.5
+ * use the s/w feature support filed.
+ */
+ if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE)
+ return 1;
+ return 0;
+}
+EXPORT_SYMBOL(mmc_can_discard);
+
+int mmc_can_sanitize(struct mmc_card *card)
+{
+#ifndef CONFIG_MMC_NOT_USE_SANITIZE
+ if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE)
+ return 1;
+#endif
+ return 0;
+}
+EXPORT_SYMBOL(mmc_can_sanitize);
+
int mmc_can_secure_erase_trim(struct mmc_card *card)
{
if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN)
@@ -1540,6 +1906,82 @@ int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,
}
EXPORT_SYMBOL(mmc_erase_group_aligned);
+static unsigned int mmc_do_calc_max_discard(struct mmc_card *card,
+ unsigned int arg)
+{
+ struct mmc_host *host = card->host;
+ unsigned int max_discard, x, y, qty = 0, max_qty, timeout;
+ unsigned int last_timeout = 0;
+
+ if (card->erase_shift)
+ max_qty = UINT_MAX >> card->erase_shift;
+ else if (mmc_card_sd(card))
+ max_qty = UINT_MAX;
+ else
+ max_qty = UINT_MAX / card->erase_size;
+
+ /* Find the largest qty with an OK timeout */
+ do {
+ y = 0;
+ for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) {
+ timeout = mmc_erase_timeout(card, arg, qty + x);
+ if (timeout > host->max_discard_to)
+ break;
+ if (timeout < last_timeout)
+ break;
+ last_timeout = timeout;
+ y = x;
+ }
+ qty += y;
+ } while (y);
+
+ if (!qty)
+ return 0;
+
+ if (qty == 1)
+ return 1;
+
+ /* Convert qty to sectors */
+ if (card->erase_shift)
+ max_discard = --qty << card->erase_shift;
+ else if (mmc_card_sd(card))
+ max_discard = qty;
+ else
+ max_discard = --qty * card->erase_size;
+
+ return max_discard;
+}
+
+unsigned int mmc_calc_max_discard(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ unsigned int max_discard, max_trim;
+
+ if (!host->max_discard_to)
+ return UINT_MAX;
+
+ /*
+ * Without erase_group_def set, MMC erase timeout depends on clock
+ * frequence which can change. In that case, the best choice is
+ * just the preferred erase size.
+ */
+ if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1))
+ return card->pref_erase;
+
+ max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG);
+ if (mmc_can_trim(card)) {
+ max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG);
+ if (max_trim < max_discard)
+ max_discard = max_trim;
+ } else if (max_discard < card->erase_size) {
+ max_discard = 0;
+ }
+ pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n",
+ mmc_hostname(host), max_discard, host->max_discard_to);
+ return max_discard;
+}
+EXPORT_SYMBOL(mmc_calc_max_discard);
+
int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
{
struct mmc_command cmd = {0};
@@ -1554,6 +1996,94 @@ int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)
}
EXPORT_SYMBOL(mmc_set_blocklen);
+static void mmc_hw_reset_for_init(struct mmc_host *host)
+{
+ if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
+ return;
+ mmc_host_clk_hold(host);
+ host->ops->hw_reset(host);
+ mmc_host_clk_release(host);
+}
+
+int mmc_can_reset(struct mmc_card *card)
+{
+ u8 rst_n_function;
+
+ if (!mmc_card_mmc(card))
+ return 0;
+ rst_n_function = card->ext_csd.rst_n_function;
+ if ((rst_n_function & EXT_CSD_RST_N_EN_MASK) != EXT_CSD_RST_N_ENABLED)
+ return 0;
+ return 1;
+}
+EXPORT_SYMBOL(mmc_can_reset);
+
+static int mmc_do_hw_reset(struct mmc_host *host, int check)
+{
+ struct mmc_card *card = host->card;
+
+ if (!host->bus_ops->power_restore)
+ return -EOPNOTSUPP;
+
+ if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset)
+ return -EOPNOTSUPP;
+
+ if (!card)
+ return -EINVAL;
+
+ if (!mmc_can_reset(card))
+ return -EOPNOTSUPP;
+
+ mmc_host_clk_hold(host);
+ mmc_set_clock(host, host->f_init);
+
+ host->ops->hw_reset(host);
+
+ /* If the reset has happened, then a status command will fail */
+ if (check) {
+ struct mmc_command cmd = {0};
+ int err;
+
+ cmd.opcode = MMC_SEND_STATUS;
+ if (!mmc_host_is_spi(card->host))
+ cmd.arg = card->rca << 16;
+ cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
+ err = mmc_wait_for_cmd(card->host, &cmd, 0);
+ if (!err) {
+ mmc_host_clk_release(host);
+ return -ENOSYS;
+ }
+ }
+
+ host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_DDR);
+ if (mmc_host_is_spi(host)) {
+ host->ios.chip_select = MMC_CS_HIGH;
+ host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;
+ } else {
+ host->ios.chip_select = MMC_CS_DONTCARE;
+ host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;
+ }
+ host->ios.bus_width = MMC_BUS_WIDTH_1;
+ host->ios.timing = MMC_TIMING_LEGACY;
+ mmc_set_ios(host);
+
+ mmc_host_clk_release(host);
+
+ return host->bus_ops->power_restore(host);
+}
+
+int mmc_hw_reset(struct mmc_host *host)
+{
+ return mmc_do_hw_reset(host, 0);
+}
+EXPORT_SYMBOL(mmc_hw_reset);
+
+int mmc_hw_reset_check(struct mmc_host *host)
+{
+ return mmc_do_hw_reset(host, 1);
+}
+EXPORT_SYMBOL(mmc_hw_reset_check);
+
static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
{
host->f_init = freq;
@@ -1565,6 +2095,12 @@ static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
mmc_power_up(host);
/*
+ * Some eMMCs (with VCCQ always on) may not be reset after power up, so
+ * do a hardware reset if possible.
+ */
+ mmc_hw_reset_for_init(host);
+
+ /*
* sdio_reset sends CMD52 to reset card. Since we do not know
* if the card is being re-initialized, just send it. CMD52
* should be ignored by SD/eMMC cards.
@@ -1586,12 +2122,50 @@ static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
return -EIO;
}
+int _mmc_detect_card_removed(struct mmc_host *host)
+{
+ int ret;
+
+ if ((host->caps & MMC_CAP_NONREMOVABLE) || !host->bus_ops->alive)
+ return 0;
+
+ if (!host->card || mmc_card_removed(host->card))
+ return 1;
+
+ ret = host->bus_ops->alive(host);
+ if (ret) {
+ mmc_card_set_removed(host->card);
+ pr_debug("%s: card remove detected\n", mmc_hostname(host));
+ }
+
+ return ret;
+}
+
+int mmc_detect_card_removed(struct mmc_host *host)
+{
+ struct mmc_card *card = host->card;
+
+ WARN_ON(!host->claimed);
+ /*
+ * The card will be considered unchanged unless we have been asked to
+ * detect a change or host requires polling to provide card detection.
+ */
+ if (card && !host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL))
+ return mmc_card_removed(card);
+
+ host->detect_change = 0;
+
+ return _mmc_detect_card_removed(host);
+}
+EXPORT_SYMBOL(mmc_detect_card_removed);
+
void mmc_rescan(struct work_struct *work)
{
static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
struct mmc_host *host =
container_of(work, struct mmc_host, detect.work);
int i;
+ bool extend_wakelock = false;
if (host->rescan_disable)
return;
@@ -1606,6 +2180,14 @@ void mmc_rescan(struct work_struct *work)
&& !(host->caps & MMC_CAP_NONREMOVABLE))
host->bus_ops->detect(host);
+ /* If the card was removed the bus will be marked
+ * as dead - extend the wakelock so userspace
+ * can respond */
+ if (host->bus_dead)
+ extend_wakelock = 1;
+
+ host->detect_change = 0;
+
/*
* Let mmc_bus_put() free the bus/bus_ops if we've found that
* the card is no longer present.
@@ -1630,16 +2212,24 @@ void mmc_rescan(struct work_struct *work)
mmc_claim_host(host);
for (i = 0; i < ARRAY_SIZE(freqs); i++) {
- if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min)))
+ if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min))) {
+ extend_wakelock = true;
break;
+ }
if (freqs[i] <= host->f_min)
break;
}
mmc_release_host(host);
out:
- if (host->caps & MMC_CAP_NEEDS_POLL)
+ if (extend_wakelock)
+ wake_lock_timeout(&host->detect_wake_lock, HZ / 2);
+ else
+ wake_unlock(&host->detect_wake_lock);
+ if (host->caps & MMC_CAP_NEEDS_POLL) {
+ wake_lock(&host->detect_wake_lock);
mmc_schedule_delayed_work(&host->detect, HZ);
+ }
}
void mmc_start_host(struct mmc_host *host)
@@ -1659,7 +2249,8 @@ void mmc_stop_host(struct mmc_host *host)
if (host->caps & MMC_CAP_DISABLE)
cancel_delayed_work(&host->disable);
- cancel_delayed_work_sync(&host->detect);
+ if (cancel_delayed_work_sync(&host->detect))
+ wake_unlock(&host->detect_wake_lock);
mmc_flush_scheduled_work();
/* clear pm flags now and let card drivers set them as needed */
@@ -1766,6 +2357,72 @@ int mmc_card_can_sleep(struct mmc_host *host)
}
EXPORT_SYMBOL(mmc_card_can_sleep);
+/*
+ * Flush the cache to the non-volatile storage.
+ */
+int mmc_flush_cache(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ int err = 0;
+
+ if (!(host->caps2 & MMC_CAP2_CACHE_CTRL))
+ return err;
+
+ if (mmc_card_mmc(card) &&
+ (card->ext_csd.cache_size > 0) &&
+ (card->ext_csd.cache_ctrl & 1)) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_FLUSH_CACHE, 1, 0);
+ if (err) {
+ pr_err("%s: cache flush error %d\n",
+ mmc_hostname(card->host), err);
+ panic("[TEST] mmc%d, %s returns %d.\n",
+ host->index, __func__, err);
+ }
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(mmc_flush_cache);
+
+/*
+ * Turn the cache ON/OFF.
+ * Turning the cache OFF shall trigger flushing of the data
+ * to the non-volatile storage.
+ */
+int mmc_cache_ctrl(struct mmc_host *host, u8 enable)
+{
+ struct mmc_card *card = host->card;
+ unsigned int timeout;
+ int err = 0;
+
+ if (!(host->caps2 & MMC_CAP2_CACHE_CTRL) ||
+ mmc_card_is_removable(host))
+ return err;
+
+ if (card && mmc_card_mmc(card) &&
+ (card->ext_csd.cache_size > 0)) {
+ enable = !!enable;
+
+ if (card->ext_csd.cache_ctrl ^ enable) {
+ timeout = enable ? card->ext_csd.generic_cmd6_time : 0;
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_CACHE_CTRL, enable, timeout);
+
+ if (err)
+ pr_err("%s: cache %s error %d\n",
+ mmc_hostname(card->host),
+ enable ? "on" : "off",
+ err);
+ else
+ card->ext_csd.cache_ctrl = enable;
+ }
+ }
+
+ return err;
+}
+EXPORT_SYMBOL(mmc_cache_ctrl);
+
#ifdef CONFIG_PM
/**
@@ -1776,28 +2433,89 @@ int mmc_suspend_host(struct mmc_host *host)
{
int err = 0;
+ if (mmc_bus_needs_resume(host))
+ return 0;
+
if (host->caps & MMC_CAP_DISABLE)
cancel_delayed_work(&host->disable);
- cancel_delayed_work(&host->detect);
+ if (cancel_delayed_work(&host->detect))
+ wake_unlock(&host->detect_wake_lock);
mmc_flush_scheduled_work();
+ if (mmc_try_claim_host(host)) {
+ u32 status;
+ u32 count=300000; /* up to 300ms */
+
+ /* if a sdmmc card exists and the card is mmc */
+ if (((host->card) && mmc_card_mmc(host->card))) {
+ int ret;
+ /* flush emmc's cache before getting suspend */
+ ret = mmc_flush_cache(host->card);
+ if (ret)
+ pr_err("%s: there is error %d while "
+ "flushing emmc's cache\n",
+ mmc_hostname(host),ret);
+ }
+ err = mmc_cache_ctrl(host, 0);
+
+ /* to make sure that emmc is not working. should check
+ emmc's state */
+ if (((host->card) && mmc_card_mmc(host->card))) {
+ do {
+ err = mmc_send_status(host->card, &status);
+ if (err)
+ break;
+
+ /* if it is not the first time */
+ if (count != 300000)
+ udelay(1);
+ count--;
+ } while (count && R1_CURRENT_STATE(status) == 7);
+ }
+ mmc_do_release_host(host);
+ } else {
+ err = -EBUSY;
+ }
+
+ if (err)
+ goto out;
mmc_bus_get(host);
if (host->bus_ops && !host->bus_dead) {
- if (host->bus_ops->suspend)
- err = host->bus_ops->suspend(host);
- if (err == -ENOSYS || !host->bus_ops->resume) {
- /*
- * We simply "remove" the card in this case.
- * It will be redetected on resume.
- */
- if (host->bus_ops->remove)
- host->bus_ops->remove(host);
- mmc_claim_host(host);
- mmc_detach_bus(host);
- mmc_power_off(host);
- mmc_release_host(host);
- host->pm_flags = 0;
- err = 0;
+
+ /*
+ * A long response time is not acceptable for device drivers
+ * when doing suspend. Prevent mmc_claim_host in the suspend
+ * sequence, to potentially wait "forever" by trying to
+ * pre-claim the host.
+ */
+ if (mmc_try_claim_host(host)) {
+ if (host->bus_ops->suspend) {
+ /*
+ * For eMMC 4.5 device send notify command
+ * before sleep, because in sleep state eMMC 4.5
+ * devices respond to only RESET and AWAKE cmd
+ */
+ mmc_poweroff_notify(host);
+ err = host->bus_ops->suspend(host);
+ }
+ if (err == -ENOSYS || !host->bus_ops->resume) {
+ /*
+ * We simply "remove" the card in this case.
+ * It will be redetected on resume.
+ */
+ if (host->bus_ops->remove)
+ host->bus_ops->remove(host);
+ mmc_claim_host(host);
+ mmc_detach_bus(host);
+ mmc_power_off(host);
+ mmc_release_host(host);
+ host->pm_flags = 0;
+ err = 0;
+ }
+
+ mmc_do_release_host(host);
+ } else {
+ err = -EBUSY;
}
}
mmc_bus_put(host);
@@ -1805,6 +2523,7 @@ int mmc_suspend_host(struct mmc_host *host)
if (!err && !mmc_card_keep_power(host))
mmc_power_off(host);
+out:
return err;
}
@@ -1819,6 +2538,12 @@ int mmc_resume_host(struct mmc_host *host)
int err = 0;
mmc_bus_get(host);
+ if (mmc_bus_manual_resume(host)) {
+ host->bus_resume_flags |= MMC_BUSRESUME_NEEDS_RESUME;
+ mmc_bus_put(host);
+ return 0;
+ }
+
if (host->bus_ops && !host->bus_dead) {
if (!mmc_card_keep_power(host)) {
mmc_power_up(host);
@@ -1869,9 +2594,15 @@ int mmc_pm_notify(struct notifier_block *notify_block,
case PM_SUSPEND_PREPARE:
spin_lock_irqsave(&host->lock, flags);
+ if (mmc_bus_needs_resume(host)) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ break;
+ }
host->rescan_disable = 1;
+ host->power_notify_type = MMC_HOST_PW_NOTIFY_SHORT;
spin_unlock_irqrestore(&host->lock, flags);
- cancel_delayed_work_sync(&host->detect);
+ if (cancel_delayed_work_sync(&host->detect))
+ wake_unlock(&host->detect_wake_lock);
if (!host->bus_ops || host->bus_ops->suspend)
break;
@@ -1882,7 +2613,9 @@ int mmc_pm_notify(struct notifier_block *notify_block,
host->bus_ops->remove(host);
mmc_detach_bus(host);
- mmc_power_off(host);
+ /* for BCM WIFI */
+ if (!(host->pm_flags & MMC_PM_IGNORE_SUSPEND_RESUME))
+ mmc_power_off(host);
mmc_release_host(host);
host->pm_flags = 0;
break;
@@ -1892,9 +2625,16 @@ int mmc_pm_notify(struct notifier_block *notify_block,
case PM_POST_RESTORE:
spin_lock_irqsave(&host->lock, flags);
+ if (mmc_bus_manual_resume(host)) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ break;
+ }
host->rescan_disable = 0;
+ host->power_notify_type = MMC_HOST_PW_NOTIFY_LONG;
spin_unlock_irqrestore(&host->lock, flags);
- mmc_detect_change(host, 0);
+ /* for BCM WIFI */
+ if (!(host->pm_flags & MMC_PM_IGNORE_SUSPEND_RESUME))
+ mmc_detect_change(host, 0);
}
@@ -1902,6 +2642,22 @@ int mmc_pm_notify(struct notifier_block *notify_block,
}
#endif
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+void mmc_set_embedded_sdio_data(struct mmc_host *host,
+ struct sdio_cis *cis,
+ struct sdio_cccr *cccr,
+ struct sdio_embedded_func *funcs,
+ int num_funcs)
+{
+ host->embedded_sdio_data.cis = cis;
+ host->embedded_sdio_data.cccr = cccr;
+ host->embedded_sdio_data.funcs = funcs;
+ host->embedded_sdio_data.num_funcs = num_funcs;
+}
+
+EXPORT_SYMBOL(mmc_set_embedded_sdio_data);
+#endif
+
static int __init mmc_init(void)
{
int ret;
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 14664f1..3400924 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -24,6 +24,7 @@ struct mmc_bus_ops {
int (*resume)(struct mmc_host *);
int (*power_save)(struct mmc_host *);
int (*power_restore)(struct mmc_host *);
+ int (*alive)(struct mmc_host *);
};
void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops);
@@ -59,6 +60,8 @@ void mmc_rescan(struct work_struct *work);
void mmc_start_host(struct mmc_host *host);
void mmc_stop_host(struct mmc_host *host);
+int _mmc_detect_card_removed(struct mmc_host *host);
+
int mmc_attach_mmc(struct mmc_host *host);
int mmc_attach_sd(struct mmc_host *host);
int mmc_attach_sdio(struct mmc_host *host);
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 998797e..b415582 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -113,6 +113,18 @@ static int mmc_ios_show(struct seq_file *s, void *data)
case MMC_TIMING_SD_HS:
str = "sd high-speed";
break;
+ case MMC_TIMING_UHS_SDR50:
+ str = "sd uhs SDR50";
+ break;
+ case MMC_TIMING_UHS_SDR104:
+ str = "sd uhs SDR104";
+ break;
+ case MMC_TIMING_UHS_DDR50:
+ str = "sd uhs DDR50";
+ break;
+ case MMC_TIMING_MMC_HS200:
+ str = "mmc high-speed SDR200";
+ break;
default:
str = "invalid";
break;
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 793d0a0..3664c49 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -53,7 +53,27 @@ static DEFINE_IDR(mmc_host_idr);
static DEFINE_SPINLOCK(mmc_host_lock);
#ifdef CONFIG_MMC_CLKGATE
+static ssize_t clkgate_delay_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mmc_host *host = cls_dev_to_mmc_host(dev);
+ return snprintf(buf, PAGE_SIZE, "%lu\n", host->clkgate_delay);
+}
+
+static ssize_t clkgate_delay_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct mmc_host *host = cls_dev_to_mmc_host(dev);
+ unsigned long flags, value;
+
+ if (kstrtoul(buf, 0, &value))
+ return -EINVAL;
+ spin_lock_irqsave(&host->clk_lock, flags);
+ host->clkgate_delay = value;
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+ return count;
+}
/*
* Enabling clock gating will make the core call out to the host
* once up and once down when it performs a request or card operation
@@ -87,8 +107,11 @@ static void mmc_host_clk_gate_delayed(struct mmc_host *host)
*/
if (!host->clk_requests) {
spin_unlock_irqrestore(&host->clk_lock, flags);
- tick_ns = DIV_ROUND_UP(1000000000, freq);
- ndelay(host->clk_delay * tick_ns);
+ /* wait only when clk_gate_delay is 0 */
+ if (!host->clkgate_delay) {
+ tick_ns = DIV_ROUND_UP(1000000000, freq);
+ ndelay(host->clk_delay * tick_ns);
+ }
} else {
/* New users appeared while waiting for this work */
spin_unlock_irqrestore(&host->clk_lock, flags);
@@ -113,7 +136,7 @@ static void mmc_host_clk_gate_delayed(struct mmc_host *host)
static void mmc_host_clk_gate_work(struct work_struct *work)
{
struct mmc_host *host = container_of(work, struct mmc_host,
- clk_gate_work);
+ clk_gate_work.work);
mmc_host_clk_gate_delayed(host);
}
@@ -130,6 +153,8 @@ void mmc_host_clk_hold(struct mmc_host *host)
{
unsigned long flags;
+ /* cancel any clock gating work scheduled by mmc_host_clk_release() */
+ cancel_delayed_work_sync(&host->clk_gate_work);
mutex_lock(&host->clk_gate_mutex);
spin_lock_irqsave(&host->clk_lock, flags);
if (host->clk_gated) {
@@ -179,7 +204,9 @@ void mmc_host_clk_release(struct mmc_host *host)
host->clk_requests--;
if (mmc_host_may_gate_card(host->card) &&
!host->clk_requests)
- queue_work(system_nrt_wq, &host->clk_gate_work);
+ queue_delayed_work(system_nrt_wq, &host->clk_gate_work,
+ msecs_to_jiffies(host->clkgate_delay));
+
spin_unlock_irqrestore(&host->clk_lock, flags);
}
@@ -212,8 +239,13 @@ static inline void mmc_host_clk_init(struct mmc_host *host)
host->clk_requests = 0;
/* Hold MCI clock for 8 cycles by default */
host->clk_delay = 8;
+ /*
+ * Default clock gating delay is 0ms to avoid wasting power.
+ * This value can be tuned by writing into sysfs entry.
+ */
+ host->clkgate_delay = 3;
host->clk_gated = false;
- INIT_WORK(&host->clk_gate_work, mmc_host_clk_gate_work);
+ INIT_DELAYED_WORK(&host->clk_gate_work, mmc_host_clk_gate_work);
spin_lock_init(&host->clk_lock);
mutex_init(&host->clk_gate_mutex);
}
@@ -228,7 +260,7 @@ static inline void mmc_host_clk_exit(struct mmc_host *host)
* Wait for any outstanding gate and then make sure we're
* ungated before exiting.
*/
- if (cancel_work_sync(&host->clk_gate_work))
+ if (cancel_delayed_work_sync(&host->clk_gate_work))
mmc_host_clk_gate_delayed(host);
if (host->clk_gated)
mmc_host_clk_hold(host);
@@ -236,6 +268,18 @@ static inline void mmc_host_clk_exit(struct mmc_host *host)
WARN_ON(host->clk_requests > 1);
}
+static inline void mmc_host_clk_sysfs_init(struct mmc_host *host)
+{
+ host->clkgate_delay_attr.show = clkgate_delay_show;
+ host->clkgate_delay_attr.store = clkgate_delay_store;
+ sysfs_attr_init(&host->clkgate_delay_attr.attr);
+ host->clkgate_delay_attr.attr.name = "clkgate_delay";
+ host->clkgate_delay_attr.attr.mode = S_IRUGO | S_IWUSR;
+ if (device_create_file(&host->class_dev, &host->clkgate_delay_attr))
+ pr_err("%s: Failed to create clkgate_delay sysfs entry\n",
+ mmc_hostname(host));
+}
+
#else
static inline void mmc_host_clk_init(struct mmc_host *host)
@@ -246,6 +290,9 @@ static inline void mmc_host_clk_exit(struct mmc_host *host)
{
}
+static inline void mmc_host_clk_sysfs_init(struct mmc_host *host)
+{
+}
#endif
/**
@@ -284,6 +331,8 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
spin_lock_init(&host->lock);
init_waitqueue_head(&host->wq);
+ wake_lock_init(&host->detect_wake_lock, WAKE_LOCK_SUSPEND,
+ kasprintf(GFP_KERNEL, "%s_detect", mmc_hostname(host)));
INIT_DELAYED_WORK(&host->detect, mmc_rescan);
INIT_DELAYED_WORK_DEFERRABLE(&host->disable, mmc_host_deeper_disable);
#ifdef CONFIG_PM
@@ -335,8 +384,11 @@ int mmc_add_host(struct mmc_host *host)
mmc_add_host_debugfs(host);
#endif
+ mmc_host_clk_sysfs_init(host);
+
mmc_start_host(host);
- register_pm_notifier(&host->pm_notify);
+ if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY))
+ register_pm_notifier(&host->pm_notify);
return 0;
}
@@ -353,7 +405,9 @@ EXPORT_SYMBOL(mmc_add_host);
*/
void mmc_remove_host(struct mmc_host *host)
{
- unregister_pm_notifier(&host->pm_notify);
+ if (!(host->pm_flags & MMC_PM_IGNORE_PM_NOTIFY))
+ unregister_pm_notifier(&host->pm_notify);
+
mmc_stop_host(host);
#ifdef CONFIG_DEBUG_FS
@@ -380,6 +434,7 @@ void mmc_free_host(struct mmc_host *host)
spin_lock(&mmc_host_lock);
idr_remove(&mmc_host_idr, host->index);
spin_unlock(&mmc_host_lock);
+ wake_lock_destroy(&host->detect_wake_lock);
put_device(&host->class_dev);
}
diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h
index fb8a5cd..08a7852 100644
--- a/drivers/mmc/core/host.h
+++ b/drivers/mmc/core/host.h
@@ -14,27 +14,6 @@
int mmc_register_host_class(void);
void mmc_unregister_host_class(void);
-
-#ifdef CONFIG_MMC_CLKGATE
-void mmc_host_clk_hold(struct mmc_host *host);
-void mmc_host_clk_release(struct mmc_host *host);
-unsigned int mmc_host_clk_rate(struct mmc_host *host);
-
-#else
-static inline void mmc_host_clk_hold(struct mmc_host *host)
-{
-}
-
-static inline void mmc_host_clk_release(struct mmc_host *host)
-{
-}
-
-static inline unsigned int mmc_host_clk_rate(struct mmc_host *host)
-{
- return host->ios.clock;
-}
-#endif
-
void mmc_host_deeper_disable(struct work_struct *work);
#endif
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 20b42c8..d118f3b 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -22,6 +22,16 @@
#include "mmc_ops.h"
#include "sd_ops.h"
+#if defined(CONFIG_MIDAS_COMMON)
+#if defined(CONFIG_TARGET_LOCALE_KOR)
+/* For the check ext_csd register in KOR model */
+#define MMC_RETRY_READ_EXT_CSD
+#else
+/* For debugging about ext_csd register value */
+#define MMC_CHECK_EXT_CSD
+#endif
+#endif
+
static const unsigned int tran_exp[] = {
10000, 100000, 1000000, 10000000,
0, 0, 0, 0
@@ -41,7 +51,7 @@ static const unsigned int tacc_mant[] = {
35, 40, 45, 50, 55, 60, 70, 80,
};
-#define UNSTUFF_BITS(resp,start,size) \
+#define UNSTUFF_BITS(resp, start, size) \
({ \
const int __size = size; \
const u32 __mask = (__size < 32 ? 1 << __size : 0) - 1; \
@@ -170,9 +180,88 @@ static int mmc_decode_csd(struct mmc_card *card)
csd->erase_size <<= csd->write_blkbits - 9;
}
+ if (UNSTUFF_BITS(resp, 13, 1))
+ printk(KERN_ERR "%s: PERM_WRITE_PROTECT was set.\n",
+ mmc_hostname(card->host));
+
return 0;
}
+#if defined(MMC_CHECK_EXT_CSD)
+/* For debugging about ext_csd register value */
+static u8 *ext_csd_backup;
+static void mmc_error_ext_csd(struct mmc_card *card, u8 *ext_csd,
+ int backup, unsigned int slice)
+{
+ int i = 0;
+ int err = 0;
+ unsigned int available_new = 0;
+ u8 *ext_csd_new;
+
+ if (backup) {
+ kfree(ext_csd_backup);
+
+ ext_csd_backup = kmalloc(512, GFP_KERNEL);
+ if (!ext_csd_backup) {
+ pr_err("%s: kmalloc is failed(512B).\n", __func__);
+ return;
+ }
+
+ memcpy(ext_csd_backup, ext_csd, 512);
+#if 0 /* Just checking */
+#define EXT_CSD_REV 192 /* RO */
+#define EXT_CSD_STRUCTURE 194 /* RO */
+#define EXT_CSD_CARD_TYPE 196 /* RO */
+#endif
+ pr_err("[TEST] eMMC check : %d, %d, %d.\n",
+ ext_csd_backup[EXT_CSD_REV],
+ ext_csd_backup[EXT_CSD_STRUCTURE],
+ ext_csd_backup[EXT_CSD_CARD_TYPE]);
+ } else {
+ ext_csd_new = kmalloc(512, GFP_KERNEL);
+ if (!ext_csd_new) {
+ pr_err("%s: ext_csd_new kmalloc is failed(512B).\n",
+ __func__);
+ } else {
+ err = mmc_send_ext_csd(card, ext_csd_new);
+ if (err)
+ pr_err("[TEST] Fail to get new EXT_CSD.\n");
+ else
+ available_new = 1;
+ }
+ pr_err("[TEST] %s: starting diff ext_csd.\n", __func__);
+ pr_err("[TEST] %s: error on slice %d: backup=%d, now=%d,"
+ "new=%d.\n",
+ __func__, slice,
+ ext_csd_backup[slice], ext_csd[slice],
+ available_new ? ext_csd_new[slice] : 0);
+ for (i = 0 ; i < 512 ; i++) {
+ if (available_new) {
+ if (ext_csd_backup[i] != ext_csd[i] ||
+ ext_csd_new[i] != ext_csd[i])
+ pr_err("%d : ext_csd_backup=%d,"
+ "ext_csd=%d,"
+ "ext_csd_new=%d.\n",
+ i,
+ ext_csd_backup[i],
+ ext_csd[i],
+ ext_csd_new[i]);
+ } else {
+ if (ext_csd_backup[i] != ext_csd[i])
+ pr_err("%d : ext_csd_backup=%d,"
+ "ext_csd=%d.\n",
+ i,
+ ext_csd_backup[i],
+ ext_csd[i]);
+ }
+ }
+ panic("eMMC's EXT_CSD error.\n");
+ }
+ return;
+
+}
+#endif
+
/*
* Read extended CSD.
*/
@@ -231,6 +320,11 @@ static int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
} else
*new_ext_csd = ext_csd;
+#if defined(MMC_CHECK_EXT_CSD)
+/* For debugging about ext_csd register value */
+ mmc_error_ext_csd(card, ext_csd, 1, 0);
+#endif
+
return err;
}
@@ -254,15 +348,23 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
"version %d\n", mmc_hostname(card->host),
card->ext_csd.raw_ext_csd_structure);
err = -EINVAL;
+#if defined(MMC_CHECK_EXT_CSD)
+ /* For debugging about ext_csd register value */
+ mmc_error_ext_csd(card, ext_csd, 0, EXT_CSD_STRUCTURE);
+#endif
goto out;
}
}
card->ext_csd.rev = ext_csd[EXT_CSD_REV];
- if (card->ext_csd.rev > 5) {
+ if (card->ext_csd.rev > 6) {
printk(KERN_ERR "%s: unrecognised EXT_CSD revision %d\n",
mmc_hostname(card->host), card->ext_csd.rev);
err = -EINVAL;
+#if defined(MMC_CHECK_EXT_CSD)
+ /* For debugging about ext_csd register value */
+ mmc_error_ext_csd(card, ext_csd, 0, EXT_CSD_REV);
+#endif
goto out;
}
@@ -283,6 +385,27 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
}
card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) {
+ case EXT_CSD_CARD_TYPE_SDR_ALL:
+ case EXT_CSD_CARD_TYPE_SDR_ALL_DDR_1_8V:
+ case EXT_CSD_CARD_TYPE_SDR_ALL_DDR_1_2V:
+ case EXT_CSD_CARD_TYPE_SDR_ALL_DDR_52:
+ card->ext_csd.hs_max_dtr = 200000000;
+ card->ext_csd.card_type = EXT_CSD_CARD_TYPE_SDR_200;
+ break;
+ case EXT_CSD_CARD_TYPE_SDR_1_2V_ALL:
+ case EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_1_8V:
+ case EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_1_2V:
+ case EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_52:
+ card->ext_csd.hs_max_dtr = 200000000;
+ card->ext_csd.card_type = EXT_CSD_CARD_TYPE_SDR_1_2V;
+ break;
+ case EXT_CSD_CARD_TYPE_SDR_1_8V_ALL:
+ case EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_1_8V:
+ case EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_1_2V:
+ case EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_52:
+ card->ext_csd.hs_max_dtr = 200000000;
+ card->ext_csd.card_type = EXT_CSD_CARD_TYPE_SDR_1_8V;
+ break;
case EXT_CSD_CARD_TYPE_DDR_52 | EXT_CSD_CARD_TYPE_52 |
EXT_CSD_CARD_TYPE_26:
card->ext_csd.hs_max_dtr = 52000000;
@@ -305,10 +428,18 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
card->ext_csd.hs_max_dtr = 26000000;
break;
default:
+#if defined(MMC_CHECK_EXT_CSD)
+ /* For debugging about ext_csd register value */
+ mmc_error_ext_csd(card, ext_csd, 0, EXT_CSD_CARD_TYPE);
+#endif
/* MMC v4 spec says this cannot happen */
printk(KERN_WARNING "%s: card is mmc v4 but doesn't "
"support any high-speed modes.\n",
mmc_hostname(card->host));
+#if defined(MMC_RETRY_READ_EXT_CSD)
+ err = -EINVAL;
+ goto out;
+#endif
}
card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT];
@@ -319,6 +450,7 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
if (card->ext_csd.rev >= 3) {
u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];
card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG];
+ card->ext_csd.boot_part_prot = ext_csd[EXT_CSD_BOOT_CONFIG_PROT];
/* EXT_CSD value is in units of 10ms, but we store in ms */
card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME];
@@ -403,8 +535,28 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
ext_csd[EXT_CSD_TRIM_MULT];
}
- if (card->ext_csd.rev >= 5)
+ if (card->ext_csd.rev >= 5) {
+ /* enable discard feature if emmc is 4.41 */
+ card->ext_csd.feature_support |= MMC_DISCARD_FEATURE;
+
+ /* check whether the eMMC card supports HPI */
+ if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1) {
+ card->ext_csd.hpi = 1;
+ if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2)
+ card->ext_csd.hpi_cmd = MMC_STOP_TRANSMISSION;
+ else
+ card->ext_csd.hpi_cmd = MMC_SEND_STATUS;
+ /*
+ * Indicate the maximum timeout to close
+ * a command interrupted by HPI
+ */
+ card->ext_csd.out_of_int_time =
+ ext_csd[EXT_CSD_OUT_OF_INTERRUPT_TIME] * 10;
+ }
+
card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM];
+ card->ext_csd.rst_n_function = ext_csd[EXT_CSD_RST_N_FUNCTION];
+ }
card->ext_csd.raw_erased_mem_count = ext_csd[EXT_CSD_ERASED_MEM_CONT];
if (ext_csd[EXT_CSD_ERASED_MEM_CONT])
@@ -412,6 +564,27 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
else
card->erased_byte = 0x0;
+ /* eMMC v4.5 or later */
+ if (card->ext_csd.rev >= 6) {
+ card->ext_csd.feature_support |= MMC_DISCARD_FEATURE;
+
+ card->ext_csd.generic_cmd6_time = 10 *
+ ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
+ card->ext_csd.power_off_longtime = 10 *
+ ext_csd[EXT_CSD_POWER_OFF_LONG_TIME];
+
+ card->ext_csd.cache_size =
+ ext_csd[EXT_CSD_CACHE_SIZE + 0] << 0 |
+ ext_csd[EXT_CSD_CACHE_SIZE + 1] << 8 |
+ ext_csd[EXT_CSD_CACHE_SIZE + 2] << 16 |
+ ext_csd[EXT_CSD_CACHE_SIZE + 3] << 24;
+
+ card->ext_csd.max_packed_writes =
+ ext_csd[EXT_CSD_MAX_PACKED_WRITES];
+ card->ext_csd.max_packed_reads =
+ ext_csd[EXT_CSD_MAX_PACKED_READS];
+ }
+
out:
return err;
}
@@ -438,9 +611,6 @@ static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width)
goto out;
}
- if (bus_width == MMC_BUS_WIDTH_1)
- goto out;
-
/* only compare read only fields */
err = (!(card->ext_csd.raw_partition_support ==
bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) &&
@@ -532,6 +702,159 @@ static struct device_type mmc_type = {
};
/*
+ * Select the PowerClass for the current bus width
+ * If power class is defined for 4/8 bit bus in the
+ * extended CSD register, select it by executing the
+ * mmc_switch command.
+ */
+static int mmc_select_powerclass(struct mmc_card *card,
+ unsigned int bus_width, u8 *ext_csd)
+{
+ int err = 0;
+ unsigned int pwrclass_val;
+ unsigned int index = 0;
+ struct mmc_host *host;
+
+ BUG_ON(!card);
+
+ host = card->host;
+ BUG_ON(!host);
+
+ if (ext_csd == NULL)
+ return 0;
+
+ /* Power class selection is supported for versions >= 4.0 */
+ if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
+ return 0;
+
+ /* Power class values are defined only for 4/8 bit bus */
+ if (bus_width == EXT_CSD_BUS_WIDTH_1)
+ return 0;
+
+ switch (1 << host->ios.vdd) {
+ case MMC_VDD_165_195:
+ if (host->ios.clock <= 26000000)
+ index = EXT_CSD_PWR_CL_26_195;
+ else if (host->ios.clock <= 52000000)
+ index = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
+ EXT_CSD_PWR_CL_52_195 :
+ EXT_CSD_PWR_CL_DDR_52_195;
+ else if (host->ios.clock <= 200000000)
+ index = EXT_CSD_PWR_CL_200_195;
+ break;
+ case MMC_VDD_32_33:
+ case MMC_VDD_33_34:
+ case MMC_VDD_34_35:
+ case MMC_VDD_35_36:
+ if (host->ios.clock <= 26000000)
+ index = EXT_CSD_PWR_CL_26_360;
+ else if (host->ios.clock <= 52000000)
+ index = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
+ EXT_CSD_PWR_CL_52_360 :
+ EXT_CSD_PWR_CL_DDR_52_360;
+ else if (host->ios.clock <= 200000000)
+ index = EXT_CSD_PWR_CL_200_360;
+ break;
+ default:
+ pr_warning("%s: Voltage range not supported "
+ "for power class.\n", mmc_hostname(host));
+ return -EINVAL;
+ }
+
+ pwrclass_val = ext_csd[index];
+
+ if (bus_width & (EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_BUS_WIDTH_8))
+ pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_8BIT_MASK) >>
+ EXT_CSD_PWR_CL_8BIT_SHIFT;
+ else
+ pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_4BIT_MASK) >>
+ EXT_CSD_PWR_CL_4BIT_SHIFT;
+
+ /* If the power class is different from the default value */
+ if (pwrclass_val > 0) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_POWER_CLASS,
+ pwrclass_val,
+ card->ext_csd.generic_cmd6_time);
+ }
+
+ return err;
+}
+
+/*
+ * Selects the desired buswidth and switch to the HS200 mode
+ * if bus width set without error
+ */
+static int mmc_select_hs200(struct mmc_card *card)
+{
+ int idx, err = 0;
+ struct mmc_host *host;
+ static unsigned ext_csd_bits[] = {
+ EXT_CSD_BUS_WIDTH_4,
+ EXT_CSD_BUS_WIDTH_8,
+ };
+ static unsigned bus_widths[] = {
+ MMC_BUS_WIDTH_4,
+ MMC_BUS_WIDTH_8,
+ };
+
+ BUG_ON(!card);
+
+ host = card->host;
+
+ if (card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_2V &&
+ host->caps2 & MMC_CAP2_HS200_1_2V_SDR)
+ if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120, 0))
+ err = mmc_set_signal_voltage(host,
+ MMC_SIGNAL_VOLTAGE_180, 0);
+
+ /* If fails try again during next card power cycle */
+ if (err)
+ goto err;
+
+ idx = (host->caps & MMC_CAP_8_BIT_DATA) ? 1 : 0;
+
+ /*
+ * Unlike SD, MMC cards dont have a configuration register to notify
+ * supported bus width. So bus test command should be run to identify
+ * the supported bus width or compare the ext csd values of current
+ * bus width and ext csd values of 1 bit mode read earlier.
+ */
+ for (; idx >= 0; idx--) {
+
+ /*
+ * Host is capable of 8bit transfer, then switch
+ * the device to work in 8bit transfer mode. If the
+ * mmc switch command returns error then switch to
+ * 4bit transfer mode. On success set the corresponding
+ * bus width on the host.
+ */
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_BUS_WIDTH,
+ ext_csd_bits[idx],
+ card->ext_csd.generic_cmd6_time);
+ if (err)
+ continue;
+
+ mmc_set_bus_width(card->host, bus_widths[idx]);
+
+ if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
+ err = mmc_compare_ext_csds(card, bus_widths[idx]);
+ else
+ err = mmc_bus_test(card, bus_widths[idx]);
+ if (!err)
+ break;
+ }
+
+ /* switch to HS200 mode if bus width set successfully */
+ if (!err)
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_HS_TIMING, 2, 0);
+err:
+ return err;
+}
+
+/*
* Handle the detection and initialisation of a card.
*
* In the case of a resume, "oldcard" will contain the card
@@ -644,14 +967,28 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
/*
* Fetch and process extended CSD.
*/
-
+#if defined(MMC_RETRY_READ_EXT_CSD)
+ {
+ int i = 0;
+ for (i = 0 ; i < 3 ; i++) {
+ err = mmc_get_ext_csd(card, &ext_csd);
+ if (err)
+ continue;
+ err = mmc_read_ext_csd(card, ext_csd);
+ if (!err)
+ break;
+ }
+ if (err)
+ goto free_card;
+ }
+#else
err = mmc_get_ext_csd(card, &ext_csd);
if (err)
goto free_card;
err = mmc_read_ext_csd(card, ext_csd);
if (err)
goto free_card;
-
+#endif
/* If doing byte addressing, check if required to do sector
* addressing. Handle the case of <2GB cards needing sector
* addressing. See section 8.1 JEDEC Standard JED84-A441;
@@ -670,7 +1007,8 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
*/
if (card->ext_csd.enhanced_area_en) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_ERASE_GROUP_DEF, 1, 0);
+ EXT_CSD_ERASE_GROUP_DEF, 1,
+ card->ext_csd.generic_cmd6_time);
if (err && err != -EBADMSG)
goto free_card;
@@ -708,12 +1046,53 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
}
/*
- * Activate high speed (if supported)
+ * Ensure eMMC boot config is protected.
*/
- if ((card->ext_csd.hs_max_dtr != 0) &&
- (host->caps & MMC_CAP_MMC_HIGHSPEED)) {
+ if (!(card->ext_csd.boot_part_prot & (0x1<<4)) &&
+ !(card->ext_csd.boot_part_prot & (0x1<<0))) {
+ card->ext_csd.boot_part_prot |= (0x1<<0);
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_HS_TIMING, 1, 0);
+ EXT_CSD_BOOT_CONFIG_PROT,
+ card->ext_csd.boot_part_prot,
+ card->ext_csd.part_time);
+ if (err && err != -EBADMSG)
+ goto free_card;
+ }
+
+ /*
+ * If the host supports the power_off_notify capability then
+ * set the notification byte in the ext_csd register of device
+ */
+ if ((host->caps2 & MMC_CAP2_POWEROFF_NOTIFY) &&
+ (card->ext_csd.rev >= 6)) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_POWER_OFF_NOTIFICATION,
+ EXT_CSD_POWER_ON,
+ card->ext_csd.generic_cmd6_time);
+ if (err && err != -EBADMSG)
+ goto free_card;
+ }
+
+ if (!err && (host->caps2 & MMC_CAP2_POWEROFF_NOTIFY))
+ /*
+ * The err can be -EBADMSG or 0,
+ * so check for success and update the flag
+ */
+ if (!err)
+ card->poweroff_notify_state = MMC_POWERED_ON;
+
+ /*
+ * Activate high speed (if supported)
+ */
+ if (card->ext_csd.hs_max_dtr != 0) {
+ err = 0;
+ if (card->ext_csd.hs_max_dtr > 52000000 &&
+ host->caps2 & MMC_CAP2_HS200)
+ err = mmc_select_hs200(card);
+ else if (host->caps & MMC_CAP_MMC_HIGHSPEED)
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_HS_TIMING, 1, 0);
+
if (err && err != -EBADMSG)
goto free_card;
@@ -722,8 +1101,15 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
mmc_hostname(card->host));
err = 0;
} else {
- mmc_card_set_highspeed(card);
- mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
+ if (card->ext_csd.hs_max_dtr > 52000000 &&
+ host->caps2 & MMC_CAP2_HS200) {
+ mmc_card_set_hs200(card);
+ mmc_set_timing(card->host,
+ MMC_TIMING_MMC_HS200);
+ } else {
+ mmc_card_set_highspeed(card);
+ mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
+ }
}
}
@@ -732,7 +1118,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
*/
max_dtr = (unsigned int)-1;
- if (mmc_card_highspeed(card)) {
+ if (mmc_card_highspeed(card) || mmc_card_hs200(card)) {
if (max_dtr > card->ext_csd.hs_max_dtr)
max_dtr = card->ext_csd.hs_max_dtr;
} else if (max_dtr > card->csd.max_dtr) {
@@ -758,9 +1144,51 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
}
/*
+ * Indicate HS200 SDR mode (if supported).
+ */
+ if (mmc_card_hs200(card)) {
+ u32 ext_csd_bits;
+ u32 bus_width = card->host->ios.bus_width;
+
+ /*
+ * For devices supporting HS200 mode, the bus width has
+ * to be set before executing the tuning function. If
+ * set before tuning, then device will respond with CRC
+ * errors for responses on CMD line. So for HS200 the
+ * sequence will be
+ * 1. set bus width 4bit / 8 bit (1 bit not supported)
+ * 2. switch to HS200 mode
+ * 3. set the clock to > 52Mhz <=200MHz and
+ * 4. execute tuning for HS200
+ */
+ if ((host->caps2 & MMC_CAP2_HS200) &&
+ card->host->ops->execute_tuning) {
+ mmc_host_clk_hold(card->host);
+ err = card->host->ops->execute_tuning(card->host,
+ MMC_SEND_TUNING_BLOCK_HS200);
+ mmc_host_clk_release(card->host);
+ }
+ if (err) {
+ pr_warning("%s: tuning execution failed\n",
+ mmc_hostname(card->host));
+ goto err;
+ }
+
+ ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
+ EXT_CSD_BUS_WIDTH_8 : EXT_CSD_BUS_WIDTH_4;
+ err = mmc_select_powerclass(card, ext_csd_bits, ext_csd);
+ if (err) {
+ pr_err("%s: power class selection to bus width %d failed\n",
+ mmc_hostname(card->host), 1 << bus_width);
+ goto err;
+ }
+ }
+
+ /*
* Activate wide bus and DDR (if supported).
*/
- if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) &&
+ if (!mmc_card_hs200(card) &&
+ (card->csd.mmca_vsn >= CSD_SPEC_VER_3) &&
(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) {
static unsigned ext_csd_bits[][2] = {
{ EXT_CSD_BUS_WIDTH_8, EXT_CSD_DDR_BUS_WIDTH_8 },
@@ -782,10 +1210,18 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
bus_width = bus_widths[idx];
if (bus_width == MMC_BUS_WIDTH_1)
ddr = 0; /* no DDR for 1-bit width */
+ err = mmc_select_powerclass(card, ext_csd_bits[idx][0],
+ ext_csd);
+ if (err)
+ pr_err("%s: power class selection to "
+ "bus width %d failed\n",
+ mmc_hostname(card->host),
+ 1 << bus_width);
+
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_BUS_WIDTH,
ext_csd_bits[idx][0],
- 0);
+ card->ext_csd.generic_cmd6_time);
if (!err) {
mmc_set_bus_width(card->host, bus_width);
@@ -805,10 +1241,21 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
}
if (!err && ddr) {
+ /* to inform to mshci driver
+ that it is working as DDR mode */
+ (host->ios).ddr = (unsigned char)ddr;
+ err = mmc_select_powerclass(card, ext_csd_bits[idx][1],
+ ext_csd);
+ if (err)
+ pr_err("%s: power class selection to "
+ "bus width %d ddr %d failed\n",
+ mmc_hostname(card->host),
+ 1 << bus_width, ddr);
+
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_BUS_WIDTH,
ext_csd_bits[idx][1],
- 0);
+ card->ext_csd.generic_cmd6_time);
}
if (err) {
printk(KERN_WARNING "%s: switch to bus width %d ddr %d "
@@ -842,6 +1289,68 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
}
}
+ /*
+ * Enable HPI feature (if supported)
+ */
+ if (card->ext_csd.hpi) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_HPI_MGMT, 1,
+ card->ext_csd.generic_cmd6_time);
+ if (err && err != -EBADMSG)
+ goto free_card;
+ if (err) {
+ pr_warning("%s: Enabling HPI failed\n",
+ mmc_hostname(card->host));
+ err = 0;
+ } else
+ card->ext_csd.hpi_en = 1;
+ }
+
+ /*
+ * If cache size is higher than 0, this indicates
+ * the existence of cache and it can be turned on.
+ */
+ if ((host->caps2 & MMC_CAP2_CACHE_CTRL) &&
+ card->ext_csd.cache_size > 0) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_CACHE_CTRL, 1,
+ card->ext_csd.generic_cmd6_time);
+ if (err && err != -EBADMSG)
+ goto free_card;
+
+ /*
+ * Only if no error, cache is turned on successfully.
+ */
+ if (err) {
+ pr_warning("%s: Cache is supported, "
+ "but failed to turn on (%d)\n",
+ mmc_hostname(card->host), err);
+ card->ext_csd.cache_ctrl = 0;
+ err = 0;
+ } else {
+ card->ext_csd.cache_ctrl = 1;
+ }
+ }
+
+ if ((host->caps2 & MMC_CAP2_PACKED_CMD) &&
+ (card->ext_csd.max_packed_writes > 0) &&
+ (card->ext_csd.max_packed_reads > 0)) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_EXP_EVENTS_CTRL,
+ EXT_CSD_PACKED_EVENT_EN,
+ card->ext_csd.generic_cmd6_time);
+ if (err && err != -EBADMSG)
+ goto free_card;
+ if (err) {
+ pr_warning("%s: Enabling packed event failed\n",
+ mmc_hostname(card->host));
+ card->ext_csd.packed_event_en = 0;
+ err = 0;
+ } else {
+ card->ext_csd.packed_event_en = 1;
+ }
+ }
+
if (!oldcard)
host->card = card;
@@ -870,6 +1379,14 @@ static void mmc_remove(struct mmc_host *host)
}
/*
+ * Card detection - card is alive.
+ */
+static int mmc_alive(struct mmc_host *host)
+{
+ return mmc_send_status(host->card, NULL);
+}
+
+/*
* Card detection callback from host.
*/
static void mmc_detect(struct mmc_host *host)
@@ -884,7 +1401,7 @@ static void mmc_detect(struct mmc_host *host)
/*
* Just check if our card has been removed.
*/
- err = mmc_send_status(host->card, NULL);
+ err = _mmc_detect_card_removed(host);
mmc_release_host(host);
@@ -985,6 +1502,7 @@ static const struct mmc_bus_ops mmc_ops = {
.suspend = NULL,
.resume = NULL,
.power_restore = mmc_power_restore,
+ .alive = mmc_alive,
};
static const struct mmc_bus_ops mmc_ops_unsafe = {
@@ -995,6 +1513,7 @@ static const struct mmc_bus_ops mmc_ops_unsafe = {
.suspend = mmc_suspend,
.resume = mmc_resume,
.power_restore = mmc_power_restore,
+ .alive = mmc_alive,
};
static void mmc_attach_bus_ops(struct mmc_host *host)
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 845ce7c..15b64318 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -17,6 +17,8 @@
#include <linux/mmc/card.h>
#include <linux/mmc/mmc.h>
+#include <plat/cpu.h>
+
#include "core.h"
#include "mmc_ops.h"
@@ -334,6 +336,7 @@ int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd)
return mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD,
ext_csd, 512);
}
+EXPORT_SYMBOL_GPL(mmc_send_ext_csd);
int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)
{
@@ -400,6 +403,10 @@ int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
/* Must check status to be sure of no errors */
do {
+#if defined(CONFIG_MACH_SMDKC210) || defined(CONFIG_MACH_SMDKV310)
+ /* HACK: in case of smdkc210, smdkv310 has problem at inand */
+ mmc_delay(3);
+#endif
err = mmc_send_status(card, &status);
if (err)
return err;
@@ -547,3 +554,38 @@ int mmc_bus_test(struct mmc_card *card, u8 bus_width)
err = mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width);
return err;
}
+
+int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
+{
+ struct mmc_command cmd = {0};
+ unsigned int opcode;
+ int err;
+
+ if (!card->ext_csd.hpi) {
+ pr_warning("%s: Card didn't support HPI command\n",
+ mmc_hostname(card->host));
+ return -EINVAL;
+ }
+
+ opcode = card->ext_csd.hpi_cmd;
+ if (opcode == MMC_STOP_TRANSMISSION)
+ cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
+ else if (opcode == MMC_SEND_STATUS)
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
+
+ cmd.opcode = opcode;
+ cmd.arg = card->rca << 16 | 1;
+ cmd.cmd_timeout_ms = card->ext_csd.out_of_int_time;
+
+ err = mmc_wait_for_cmd(card->host, &cmd, 0);
+ if (err) {
+ pr_warn("%s: error %d interrupting operation. "
+ "HPI command response %#x\n", mmc_hostname(card->host),
+ err, cmd.resp[0]);
+ return err;
+ }
+ if (status)
+ *status = cmd.resp[0];
+
+ return 0;
+}
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
index 9276946..3dd8941 100644
--- a/drivers/mmc/core/mmc_ops.h
+++ b/drivers/mmc/core/mmc_ops.h
@@ -26,6 +26,7 @@ int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp);
int mmc_spi_set_crc(struct mmc_host *host, int use_crc);
int mmc_card_sleepawake(struct mmc_host *host, int sleep);
int mmc_bus_test(struct mmc_card *card, u8 bus_width);
+int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status);
#endif
diff --git a/drivers/mmc/core/quirks.c b/drivers/mmc/core/quirks.c
index 3a59621..aca1c3c 100644
--- a/drivers/mmc/core/quirks.c
+++ b/drivers/mmc/core/quirks.c
@@ -21,6 +21,22 @@
#define SDIO_DEVICE_ID_TI_WL1271 0x4076
#endif
+#ifndef SDIO_VENDOR_ID_BRCM
+#define SDIO_VENDOR_ID_BRCM 0x02D0
+#endif
+
+#ifndef SDIO_DEVICE_ID_BRCM_BCM4330
+#define SDIO_DEVICE_ID_BRCM_BCM4330 0x4330
+#endif
+
+#ifndef SDIO_DEVICE_ID_BRCM_BCM4334
+#define SDIO_DEVICE_ID_BRCM_BCM4334 0x4334
+#endif
+
+#ifndef SDIO_DEVICE_ID_BRCM_BCM43241
+#define SDIO_DEVICE_ID_BRCM_BCM43241 0x4324
+#endif
+
/*
* This hook just adds a quirk for all sdio devices
*/
@@ -46,6 +62,15 @@ static const struct mmc_fixup mmc_fixup_methods[] = {
SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271,
add_quirk, MMC_QUIRK_DISABLE_CD),
+ SDIO_FIXUP(SDIO_VENDOR_ID_BRCM, SDIO_DEVICE_ID_BRCM_BCM4330,
+ remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING),
+
+ SDIO_FIXUP(SDIO_VENDOR_ID_BRCM, SDIO_DEVICE_ID_BRCM_BCM4334,
+ remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING),
+
+ SDIO_FIXUP(SDIO_VENDOR_ID_BRCM, SDIO_DEVICE_ID_BRCM_BCM43241,
+ remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING),
+
END_FIXUP
};
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index bd8805c..4586eaa 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -306,6 +306,9 @@ static int mmc_read_switch(struct mmc_card *card)
goto out;
}
+ if (status[13] & 0x02)
+ card->sw_caps.hs_max_dtr = 50000000;
+
if (card->scr.sda_spec3) {
card->sw_caps.sd3_bus_mode = status[13];
@@ -348,9 +351,6 @@ static int mmc_read_switch(struct mmc_card *card)
}
card->sw_caps.sd3_curr_limit = status[7];
- } else {
- if (status[13] & 0x02)
- card->sw_caps.hs_max_dtr = 50000000;
}
out:
@@ -409,52 +409,64 @@ out:
static int sd_select_driver_type(struct mmc_card *card, u8 *status)
{
- int host_drv_type = 0, card_drv_type = 0;
+ int host_drv_type = SD_DRIVER_TYPE_B;
+ int card_drv_type = SD_DRIVER_TYPE_B;
+ int drive_strength;
int err;
/*
* If the host doesn't support any of the Driver Types A,C or D,
- * default Driver Type B is used.
+ * or there is no board specific handler then default Driver
+ * Type B is used.
*/
if (!(card->host->caps & (MMC_CAP_DRIVER_TYPE_A | MMC_CAP_DRIVER_TYPE_C
| MMC_CAP_DRIVER_TYPE_D)))
return 0;
- if (card->host->caps & MMC_CAP_DRIVER_TYPE_A) {
- host_drv_type = MMC_SET_DRIVER_TYPE_A;
- if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_A)
- card_drv_type = MMC_SET_DRIVER_TYPE_A;
- else if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_B)
- card_drv_type = MMC_SET_DRIVER_TYPE_B;
- else if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C)
- card_drv_type = MMC_SET_DRIVER_TYPE_C;
- } else if (card->host->caps & MMC_CAP_DRIVER_TYPE_C) {
- host_drv_type = MMC_SET_DRIVER_TYPE_C;
- if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C)
- card_drv_type = MMC_SET_DRIVER_TYPE_C;
- } else if (!(card->host->caps & MMC_CAP_DRIVER_TYPE_D)) {
- /*
- * If we are here, that means only the default driver type
- * B is supported by the host.
- */
- host_drv_type = MMC_SET_DRIVER_TYPE_B;
- if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_B)
- card_drv_type = MMC_SET_DRIVER_TYPE_B;
- else if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C)
- card_drv_type = MMC_SET_DRIVER_TYPE_C;
- }
+ if (!card->host->ops->select_drive_strength)
+ return 0;
+
+ if (card->host->caps & MMC_CAP_DRIVER_TYPE_A)
+ host_drv_type |= SD_DRIVER_TYPE_A;
+
+ if (card->host->caps & MMC_CAP_DRIVER_TYPE_C)
+ host_drv_type |= SD_DRIVER_TYPE_C;
+
+ if (card->host->caps & MMC_CAP_DRIVER_TYPE_D)
+ host_drv_type |= SD_DRIVER_TYPE_D;
+
+ if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_A)
+ card_drv_type |= SD_DRIVER_TYPE_A;
+
+ if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C)
+ card_drv_type |= SD_DRIVER_TYPE_C;
+
+ if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_D)
+ card_drv_type |= SD_DRIVER_TYPE_D;
- err = mmc_sd_switch(card, 1, 2, card_drv_type, status);
+ /*
+ * The drive strength that the hardware can support
+ * depends on the board design. Pass the appropriate
+ * information and let the hardware specific code
+ * return what is possible given the options
+ */
+ mmc_host_clk_hold(card->host);
+ drive_strength = card->host->ops->select_drive_strength(
+ card->sw_caps.uhs_max_dtr,
+ host_drv_type, card_drv_type);
+ mmc_host_clk_release(card->host);
+
+ err = mmc_sd_switch(card, 1, 2, drive_strength, status);
if (err)
return err;
- if ((status[15] & 0xF) != card_drv_type) {
- printk(KERN_WARNING "%s: Problem setting driver strength!\n",
+ if ((status[15] & 0xF) != drive_strength) {
+ printk(KERN_WARNING "%s: Problem setting drive strength!\n",
mmc_hostname(card->host));
return 0;
}
- mmc_set_driver_type(card->host, host_drv_type);
+ mmc_set_driver_type(card->host, drive_strength);
return 0;
}
@@ -624,8 +636,12 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)
goto out;
/* SPI mode doesn't define CMD19 */
- if (!mmc_host_is_spi(card->host) && card->host->ops->execute_tuning)
- err = card->host->ops->execute_tuning(card->host);
+ if (!mmc_host_is_spi(card->host) && card->host->ops->execute_tuning) {
+ mmc_host_clk_hold(card->host);
+ err = card->host->ops->execute_tuning(card->host,
+ MMC_SEND_TUNING_BLOCK);
+ mmc_host_clk_release(card->host);
+ }
out:
kfree(status);
@@ -764,6 +780,9 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
bool reinit)
{
int err;
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+ int retries;
+#endif
if (!reinit) {
/*
@@ -790,7 +809,26 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
/*
* Fetch switch information from card.
*/
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+ for (retries = 1; retries <= 3; retries++) {
+ err = mmc_read_switch(card);
+ if (!err) {
+ if (retries > 1) {
+ printk(KERN_WARNING
+ "%s: recovered\n",
+ mmc_hostname(host));
+ }
+ break;
+ } else {
+ printk(KERN_WARNING
+ "%s: read switch failed (attempt %d)\n",
+ mmc_hostname(host), retries);
+ }
+ }
+#else
err = mmc_read_switch(card);
+#endif
+
if (err)
return err;
}
@@ -813,8 +851,11 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
if (!reinit) {
int ro = -1;
- if (host->ops->get_ro)
+ if (host->ops->get_ro) {
+ mmc_host_clk_hold(card->host);
ro = host->ops->get_ro(host);
+ mmc_host_clk_release(card->host);
+ }
if (ro < 0) {
printk(KERN_WARNING "%s: host does not "
@@ -926,14 +967,17 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
goto free_card;
/* Card is an ultra-high-speed card */
- mmc_sd_card_set_uhs(card);
+ mmc_card_set_uhs(card);
/*
* Since initialization is now complete, enable preset
* value registers for UHS-I cards.
*/
- if (host->ops->enable_preset_value)
+ if (host->ops->enable_preset_value) {
+ mmc_host_clk_hold(card->host);
host->ops->enable_preset_value(host, true);
+ mmc_host_clk_release(card->host);
+ }
} else {
/*
* Attempt to change to high-speed (if supported)
@@ -985,22 +1029,48 @@ static void mmc_sd_remove(struct mmc_host *host)
}
/*
+ * Card detection - card is alive.
+ */
+static int mmc_sd_alive(struct mmc_host *host)
+{
+ return mmc_send_status(host->card, NULL);
+}
+
+/*
* Card detection callback from host.
*/
static void mmc_sd_detect(struct mmc_host *host)
{
- int err;
+ int err = 0;
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+ int retries = 5;
+#endif
BUG_ON(!host);
BUG_ON(!host->card);
-
+
mmc_claim_host(host);
/*
* Just check if our card has been removed.
*/
- err = mmc_send_status(host->card, NULL);
-
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+ while(retries) {
+ err = _mmc_detect_card_removed(host);
+ if (err) {
+ retries--;
+ udelay(5);
+ continue;
+ }
+ break;
+ }
+ if (!retries) {
+ printk(KERN_ERR "%s(%s): Unable to re-detect card (%d)\n",
+ __func__, mmc_hostname(host), err);
+ }
+#else
+ err = _mmc_detect_card_removed(host);
+#endif
mmc_release_host(host);
if (err) {
@@ -1039,12 +1109,31 @@ static int mmc_sd_suspend(struct mmc_host *host)
static int mmc_sd_resume(struct mmc_host *host)
{
int err;
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+ int retries;
+#endif
BUG_ON(!host);
BUG_ON(!host->card);
mmc_claim_host(host);
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+ retries = 5;
+ while (retries) {
+ err = mmc_sd_init_card(host, host->ocr, host->card);
+
+ if (err) {
+ printk(KERN_ERR "%s: Re-init card rc = %d (retries = %d)\n",
+ mmc_hostname(host), err, retries);
+ mdelay(5);
+ retries--;
+ continue;
+ }
+ break;
+ }
+#else
err = mmc_sd_init_card(host, host->ocr, host->card);
+#endif
mmc_release_host(host);
return err;
@@ -1068,6 +1157,7 @@ static const struct mmc_bus_ops mmc_sd_ops = {
.suspend = NULL,
.resume = NULL,
.power_restore = mmc_sd_power_restore,
+ .alive = mmc_sd_alive,
};
static const struct mmc_bus_ops mmc_sd_ops_unsafe = {
@@ -1076,6 +1166,7 @@ static const struct mmc_bus_ops mmc_sd_ops_unsafe = {
.suspend = mmc_sd_suspend,
.resume = mmc_sd_resume,
.power_restore = mmc_sd_power_restore,
+ .alive = mmc_sd_alive,
};
static void mmc_sd_attach_bus_ops(struct mmc_host *host)
@@ -1096,6 +1187,9 @@ int mmc_attach_sd(struct mmc_host *host)
{
int err;
u32 ocr;
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+ int retries;
+#endif
BUG_ON(!host);
WARN_ON(!host->claimed);
@@ -1103,15 +1197,18 @@ int mmc_attach_sd(struct mmc_host *host)
/* Make sure we are at 3.3V signalling voltage */
err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, false);
if (err)
- return err;
+ goto ret_err;
/* Disable preset value enable if already set since last time */
- if (host->ops->enable_preset_value)
+ if (host->ops->enable_preset_value) {
+ mmc_host_clk_hold(host);
host->ops->enable_preset_value(host, false);
+ mmc_host_clk_release(host);
+ }
err = mmc_send_app_op_cond(host, 0, &ocr);
if (err)
- return err;
+ goto ret_err;
mmc_sd_attach_bus_ops(host);
if (host->ocr_avail_sd)
@@ -1160,9 +1257,27 @@ int mmc_attach_sd(struct mmc_host *host)
/*
* Detect and init the card.
*/
+#ifdef CONFIG_MMC_PARANOID_SD_INIT
+ retries = 5;
+ while (retries) {
+ err = mmc_sd_init_card(host, host->ocr, NULL);
+ if (err) {
+ retries--;
+ continue;
+ }
+ break;
+ }
+
+ if (!retries) {
+ printk(KERN_ERR "%s: mmc_sd_init_card() failure (err = %d)\n",
+ mmc_hostname(host), err);
+ goto err;
+ }
+#else
err = mmc_sd_init_card(host, host->ocr, NULL);
if (err)
goto err;
+#endif
mmc_release_host(host);
err = mmc_add_card(host->card);
@@ -1170,6 +1285,8 @@ int mmc_attach_sd(struct mmc_host *host)
if (err)
goto remove_card;
+ mmc_host_sd_set_init_stat(host);
+ mmc_host_sd_clear_prev_stat(host);
return 0;
remove_card:
@@ -1182,6 +1299,9 @@ err:
printk(KERN_ERR "%s: error %d whilst initialising SD card\n",
mmc_hostname(host), err);
+ret_err:
+ mmc_host_sd_clear_init_stat(host);
+ mmc_host_sd_set_prev_stat(host);
return err;
}
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index ac492ac..26e42d8 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -14,6 +14,7 @@
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
+#include <linux/mmc/mmc.h>
#include <linux/mmc/sdio.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
@@ -27,6 +28,10 @@
#include "sdio_ops.h"
#include "sdio_cis.h"
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+#include <linux/mmc/sdio_ids.h>
+#endif
+
static int sdio_read_fbr(struct sdio_func *func)
{
int ret;
@@ -97,11 +102,13 @@ fail:
return ret;
}
-static int sdio_read_cccr(struct mmc_card *card)
+static int sdio_read_cccr(struct mmc_card *card, u32 ocr)
{
int ret;
int cccr_vsn;
+ int uhs = ocr & R4_18V_PRESENT;
unsigned char data;
+ unsigned char speed;
memset(&card->cccr, 0, sizeof(struct sdio_cccr));
@@ -111,7 +118,7 @@ static int sdio_read_cccr(struct mmc_card *card)
cccr_vsn = data & 0x0f;
- if (cccr_vsn > SDIO_CCCR_REV_1_20) {
+ if (cccr_vsn > SDIO_CCCR_REV_3_00) {
printk(KERN_ERR "%s: unrecognised CCCR structure version %d\n",
mmc_hostname(card->host), cccr_vsn);
return -EINVAL;
@@ -140,12 +147,60 @@ static int sdio_read_cccr(struct mmc_card *card)
}
if (cccr_vsn >= SDIO_CCCR_REV_1_20) {
- ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_SPEED, 0, &data);
+ ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_SPEED, 0, &speed);
if (ret)
goto out;
- if (data & SDIO_SPEED_SHS)
- card->cccr.high_speed = 1;
+ card->scr.sda_spec3 = 0;
+ card->sw_caps.sd3_bus_mode = 0;
+ card->sw_caps.sd3_drv_type = 0;
+ if (cccr_vsn >= SDIO_CCCR_REV_3_00 && uhs) {
+ card->scr.sda_spec3 = 1;
+ ret = mmc_io_rw_direct(card, 0, 0,
+ SDIO_CCCR_UHS, 0, &data);
+ if (ret)
+ goto out;
+
+ if (card->host->caps &
+ (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
+ MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
+ MMC_CAP_UHS_DDR50)) {
+ if (data & SDIO_UHS_DDR50)
+ card->sw_caps.sd3_bus_mode
+ |= SD_MODE_UHS_DDR50;
+
+ if (data & SDIO_UHS_SDR50)
+ card->sw_caps.sd3_bus_mode
+ |= SD_MODE_UHS_SDR50;
+
+ if (data & SDIO_UHS_SDR104)
+ card->sw_caps.sd3_bus_mode
+ |= SD_MODE_UHS_SDR104;
+ }
+
+ ret = mmc_io_rw_direct(card, 0, 0,
+ SDIO_CCCR_DRIVE_STRENGTH, 0, &data);
+ if (ret)
+ goto out;
+
+ if (data & SDIO_DRIVE_SDTA)
+ card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_A;
+ if (data & SDIO_DRIVE_SDTC)
+ card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_C;
+ if (data & SDIO_DRIVE_SDTD)
+ card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_D;
+ }
+
+ /* if no uhs mode ensure we check for high speed */
+ if (!card->sw_caps.sd3_bus_mode) {
+ if (speed & SDIO_SPEED_SHS) {
+ card->cccr.high_speed = 1;
+ card->sw_caps.hs_max_dtr = 50000000;
+ } else {
+ card->cccr.high_speed = 0;
+ card->sw_caps.hs_max_dtr = 25000000;
+ }
+ }
}
out:
@@ -327,6 +382,194 @@ static unsigned mmc_sdio_get_max_clock(struct mmc_card *card)
return max_dtr;
}
+static unsigned char host_drive_to_sdio_drive(int host_strength)
+{
+ switch (host_strength) {
+ case MMC_SET_DRIVER_TYPE_A:
+ return SDIO_DTSx_SET_TYPE_A;
+ case MMC_SET_DRIVER_TYPE_B:
+ return SDIO_DTSx_SET_TYPE_B;
+ case MMC_SET_DRIVER_TYPE_C:
+ return SDIO_DTSx_SET_TYPE_C;
+ case MMC_SET_DRIVER_TYPE_D:
+ return SDIO_DTSx_SET_TYPE_D;
+ default:
+ return SDIO_DTSx_SET_TYPE_B;
+ }
+}
+
+static void sdio_select_driver_type(struct mmc_card *card)
+{
+ int host_drv_type = SD_DRIVER_TYPE_B;
+ int card_drv_type = SD_DRIVER_TYPE_B;
+ int drive_strength;
+ unsigned char card_strength;
+ int err;
+
+ /*
+ * If the host doesn't support any of the Driver Types A,C or D,
+ * or there is no board specific handler then default Driver
+ * Type B is used.
+ */
+ if (!(card->host->caps &
+ (MMC_CAP_DRIVER_TYPE_A |
+ MMC_CAP_DRIVER_TYPE_C |
+ MMC_CAP_DRIVER_TYPE_D)))
+ return;
+
+ if (!card->host->ops->select_drive_strength)
+ return;
+
+ if (card->host->caps & MMC_CAP_DRIVER_TYPE_A)
+ host_drv_type |= SD_DRIVER_TYPE_A;
+
+ if (card->host->caps & MMC_CAP_DRIVER_TYPE_C)
+ host_drv_type |= SD_DRIVER_TYPE_C;
+
+ if (card->host->caps & MMC_CAP_DRIVER_TYPE_D)
+ host_drv_type |= SD_DRIVER_TYPE_D;
+
+ if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_A)
+ card_drv_type |= SD_DRIVER_TYPE_A;
+
+ if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C)
+ card_drv_type |= SD_DRIVER_TYPE_C;
+
+ if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_D)
+ card_drv_type |= SD_DRIVER_TYPE_D;
+
+ /*
+ * The drive strength that the hardware can support
+ * depends on the board design. Pass the appropriate
+ * information and let the hardware specific code
+ * return what is possible given the options
+ */
+ drive_strength = card->host->ops->select_drive_strength(
+ card->sw_caps.uhs_max_dtr,
+ host_drv_type, card_drv_type);
+
+ /* if error just use default for drive strength B */
+ err = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_DRIVE_STRENGTH, 0,
+ &card_strength);
+ if (err)
+ return;
+
+ card_strength &= ~(SDIO_DRIVE_DTSx_MASK<<SDIO_DRIVE_DTSx_SHIFT);
+ card_strength |= host_drive_to_sdio_drive(drive_strength);
+
+ err = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_DRIVE_STRENGTH,
+ card_strength, NULL);
+
+ /* if error default to drive strength B */
+ if (!err)
+ mmc_set_driver_type(card->host, drive_strength);
+}
+
+
+static int sdio_set_bus_speed_mode(struct mmc_card *card)
+{
+ unsigned int bus_speed, timing;
+ int err;
+ unsigned char speed;
+
+ /*
+ * If the host doesn't support any of the UHS-I modes, fallback on
+ * default speed.
+ */
+ if (!(card->host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
+ MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50)))
+ return 0;
+
+ bus_speed = SDIO_SPEED_SDR12;
+ timing = MMC_TIMING_UHS_SDR12;
+ if ((card->host->caps & MMC_CAP_UHS_SDR104) &&
+ (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) {
+ bus_speed = SDIO_SPEED_SDR104;
+ timing = MMC_TIMING_UHS_SDR104;
+ card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR;
+ } else if ((card->host->caps & MMC_CAP_UHS_DDR50) &&
+ (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) {
+ bus_speed = SDIO_SPEED_DDR50;
+ timing = MMC_TIMING_UHS_DDR50;
+ card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR;
+ } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
+ MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode &
+ SD_MODE_UHS_SDR50)) {
+ bus_speed = SDIO_SPEED_SDR50;
+ timing = MMC_TIMING_UHS_SDR50;
+ card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR;
+ } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
+ MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) &&
+ (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) {
+ bus_speed = SDIO_SPEED_SDR25;
+ timing = MMC_TIMING_UHS_SDR25;
+ card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR;
+ } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
+ MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 |
+ MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode &
+ SD_MODE_UHS_SDR12)) {
+ bus_speed = SDIO_SPEED_SDR12;
+ timing = MMC_TIMING_UHS_SDR12;
+ card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR;
+ }
+
+ err = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_SPEED, 0, &speed);
+ if (err)
+ return err;
+
+ speed &= ~SDIO_SPEED_BSS_MASK;
+ speed |= bus_speed;
+ err = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_SPEED, speed, NULL);
+ if (err)
+ return err;
+
+ if (bus_speed) {
+ mmc_set_timing(card->host, timing);
+ mmc_set_clock(card->host, card->sw_caps.uhs_max_dtr);
+ }
+
+ return 0;
+}
+
+/*
+ * UHS-I specific initialization procedure
+ */
+static int mmc_sdio_init_uhs_card(struct mmc_card *card)
+{
+ int err;
+
+ if (!card->scr.sda_spec3)
+ return 0;
+
+ /*
+ * Switch to wider bus (if supported).
+ */
+ if (card->host->caps & MMC_CAP_4_BIT_DATA) {
+ err = sdio_enable_4bit_bus(card);
+ if (err > 0) {
+ mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
+ err = 0;
+ }
+ }
+
+ /* Set the driver strength for the card */
+ sdio_select_driver_type(card);
+
+ /* Set bus speed mode of the card */
+ err = sdio_set_bus_speed_mode(card);
+ if (err)
+ goto out;
+
+ /* Initialize and start re-tuning timer */
+ if (!mmc_host_is_spi(card->host) && card->host->ops->execute_tuning)
+ err = card->host->ops->execute_tuning(card->host,
+ MMC_SEND_TUNING_BLOCK);
+
+out:
+
+ return err;
+}
+
/*
* Handle the detection and initialisation of a card.
*
@@ -394,6 +637,30 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
host->ops->init_card(host, card);
/*
+ * If the host and card support UHS-I mode request the card
+ * to switch to 1.8V signaling level. No 1.8v signalling if
+ * UHS mode is not enabled to maintain compatibilty and some
+ * systems that claim 1.8v signalling in fact do not support
+ * it.
+ */
+ if ((ocr & R4_18V_PRESENT) &&
+ (host->caps &
+ (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
+ MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
+ MMC_CAP_UHS_DDR50))) {
+ err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180,
+ true);
+ if (err) {
+ ocr &= ~R4_18V_PRESENT;
+ host->ocr &= ~R4_18V_PRESENT;
+ }
+ err = 0;
+ } else {
+ ocr &= ~R4_18V_PRESENT;
+ host->ocr &= ~R4_18V_PRESENT;
+ }
+
+ /*
* For native busses: set card RCA and quit open drain mode.
*/
if (!powered_resume && !mmc_host_is_spi(host)) {
@@ -449,19 +716,35 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
goto finish;
}
- /*
- * Read the common registers.
- */
- err = sdio_read_cccr(card);
- if (err)
- goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ if (host->embedded_sdio_data.cccr)
+ memcpy(&card->cccr, host->embedded_sdio_data.cccr, sizeof(struct sdio_cccr));
+ else {
+#endif
+ /*
+ * Read the common registers.
+ */
+ err = sdio_read_cccr(card, ocr);
+ if (err)
+ goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ }
+#endif
- /*
- * Read the common CIS tuples.
- */
- err = sdio_read_common_cis(card);
- if (err)
- goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ if (host->embedded_sdio_data.cis)
+ memcpy(&card->cis, host->embedded_sdio_data.cis, sizeof(struct sdio_cis));
+ else {
+#endif
+ /*
+ * Read the common CIS tuples.
+ */
+ err = sdio_read_common_cis(card);
+ if (err)
+ goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ }
+#endif
if (oldcard) {
int same = (card->cis.vendor == oldcard->cis.vendor &&
@@ -494,29 +777,39 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
if (err)
goto remove;
- /*
- * Switch to high-speed (if supported).
- */
- err = sdio_enable_hs(card);
- if (err > 0)
- mmc_sd_go_highspeed(card);
- else if (err)
- goto remove;
+ /* Initialization sequence for UHS-I cards */
+ /* Only if card supports 1.8v and UHS signaling */
+ if ((ocr & R4_18V_PRESENT) && card->sw_caps.sd3_bus_mode) {
+ err = mmc_sdio_init_uhs_card(card);
+ if (err)
+ goto remove;
- /*
- * Change to the card's maximum speed.
- */
- mmc_set_clock(host, mmc_sdio_get_max_clock(card));
+ /* Card is an ultra-high-speed card */
+ mmc_card_set_uhs(card);
+ } else {
+ /*
+ * Switch to high-speed (if supported).
+ */
+ err = sdio_enable_hs(card);
+ if (err > 0)
+ mmc_sd_go_highspeed(card);
+ else if (err)
+ goto remove;
- /*
- * Switch to wider bus (if supported).
- */
- err = sdio_enable_4bit_bus(card);
- if (err > 0)
- mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
- else if (err)
- goto remove;
+ /*
+ * Change to the card's maximum speed.
+ */
+ mmc_set_clock(host, mmc_sdio_get_max_clock(card));
+ /*
+ * Switch to wider bus (if supported).
+ */
+ err = sdio_enable_4bit_bus(card);
+ if (err > 0)
+ mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
+ else if (err)
+ goto remove;
+ }
finish:
if (!oldcard)
host->card = card;
@@ -552,6 +845,14 @@ static void mmc_sdio_remove(struct mmc_host *host)
}
/*
+ * Card detection - card is alive.
+ */
+static int mmc_sdio_alive(struct mmc_host *host)
+{
+ return mmc_select_card(host->card);
+}
+
+/*
* Card detection callback from host.
*/
static void mmc_sdio_detect(struct mmc_host *host)
@@ -573,7 +874,7 @@ static void mmc_sdio_detect(struct mmc_host *host)
/*
* Just check if our card has been removed.
*/
- err = mmc_select_card(host->card);
+ err = _mmc_detect_card_removed(host);
mmc_release_host(host);
@@ -632,11 +933,14 @@ static int mmc_sdio_suspend(struct mmc_host *host)
}
}
+#ifdef CONFIG_MACH_PX
+#else
if (!err && mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) {
mmc_claim_host(host);
sdio_disable_wide(host->card);
mmc_release_host(host);
}
+#endif
return err;
}
@@ -728,7 +1032,7 @@ static int mmc_sdio_power_restore(struct mmc_host *host)
if (host->ocr_avail_sdio)
host->ocr_avail = host->ocr_avail_sdio;
- host->ocr = mmc_select_voltage(host, ocr & ~0x7F);
+ host->ocr = mmc_select_voltage(host, ocr & ~0xFF);
if (!host->ocr) {
ret = -EINVAL;
goto out;
@@ -751,9 +1055,12 @@ static const struct mmc_bus_ops mmc_sdio_ops = {
.suspend = mmc_sdio_suspend,
.resume = mmc_sdio_resume,
.power_restore = mmc_sdio_power_restore,
+ .alive = mmc_sdio_alive,
};
-
+#if defined(CONFIG_MACH_M0) && defined(CONFIG_TARGET_LOCALE_EUR)
+extern void print_epll_con0(void);
+#endif
/*
* Starting point for SDIO card init.
*/
@@ -770,6 +1077,11 @@ int mmc_attach_sdio(struct mmc_host *host)
if (err)
return err;
+#if defined(CONFIG_MACH_M0) && defined(CONFIG_TARGET_LOCALE_EUR)
+ /* a sdio module is detected. print EPLL */
+ print_epll_con0();
+#endif
+
mmc_attach_bus(host, &mmc_sdio_ops);
if (host->ocr_avail_sdio)
host->ocr_avail = host->ocr_avail_sdio;
@@ -778,11 +1090,11 @@ int mmc_attach_sdio(struct mmc_host *host)
* Sanity check the voltages that the card claims to
* support.
*/
- if (ocr & 0x7F) {
+ if (ocr & 0xFF) {
printk(KERN_WARNING "%s: card claims to support voltages "
"below the defined range. These will be ignored.\n",
mmc_hostname(host));
- ocr &= ~0x7F;
+ ocr &= ~0xFF;
}
host->ocr = mmc_select_voltage(host, ocr);
@@ -799,8 +1111,17 @@ int mmc_attach_sdio(struct mmc_host *host)
* Detect and init the card.
*/
err = mmc_sdio_init_card(host, host->ocr, NULL, 0);
- if (err)
- goto err;
+ if (err) {
+ if (err == -EAGAIN) {
+ /*
+ * Retry initialization with S18R set to 0.
+ */
+ host->ocr &= ~R4_18V_PRESENT;
+ err = mmc_sdio_init_card(host, host->ocr, NULL, 0);
+ }
+ if (err)
+ goto err;
+ }
card = host->card;
/*
@@ -827,14 +1148,36 @@ int mmc_attach_sdio(struct mmc_host *host)
funcs = (ocr & 0x70000000) >> 28;
card->sdio_funcs = 0;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ if (host->embedded_sdio_data.funcs)
+ card->sdio_funcs = funcs = host->embedded_sdio_data.num_funcs;
+#endif
+
/*
* Initialize (but don't add) all present functions.
*/
for (i = 0; i < funcs; i++, card->sdio_funcs++) {
- err = sdio_init_func(host->card, i + 1);
- if (err)
- goto remove;
-
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ if (host->embedded_sdio_data.funcs) {
+ struct sdio_func *tmp;
+
+ tmp = sdio_alloc_func(host->card);
+ if (IS_ERR(tmp))
+ goto remove;
+ tmp->num = (i + 1);
+ card->sdio_func[i] = tmp;
+ tmp->class = host->embedded_sdio_data.funcs[i].f_class;
+ tmp->max_blksize = host->embedded_sdio_data.funcs[i].f_maxblksize;
+ tmp->vendor = card->cis.vendor;
+ tmp->device = card->cis.device;
+ } else {
+#endif
+ err = sdio_init_func(host->card, i + 1);
+ if (err)
+ goto remove;
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ }
+#endif
/*
* Enable Runtime PM for this func (if supported)
*/
@@ -882,3 +1225,84 @@ err:
return err;
}
+int sdio_reset_comm(struct mmc_card *card)
+{
+ struct mmc_host *host = card->host;
+ u32 ocr;
+ int err;
+
+ printk("%s():\n", __func__);
+ mmc_claim_host(host);
+
+ mmc_go_idle(host);
+
+ mmc_set_clock(host, host->f_min);
+
+ err = mmc_send_io_op_cond(host, 0, &ocr);
+ if (err)
+ goto err;
+
+ if (ocr & 0xFF) {
+ printk(KERN_WARNING "%s: card claims to support voltages "
+ "below the defined range. These will be ignored.\n",
+ mmc_hostname(host));
+ ocr &= ~0xFF;
+ }
+
+ host->ocr = mmc_select_voltage(host, ocr);
+ if (!host->ocr) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ err = mmc_send_io_op_cond(host, host->ocr, &ocr);
+ if (err)
+ goto err;
+
+ if (mmc_host_is_spi(host)) {
+ err = mmc_spi_set_crc(host, use_spi_crc);
+ if (err)
+ goto err;
+ }
+
+ if (!mmc_host_is_spi(host)) {
+ err = mmc_send_relative_addr(host, &card->rca);
+ if (err)
+ goto err;
+ mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
+ }
+ if (!mmc_host_is_spi(host)) {
+ err = mmc_select_card(card);
+ if (err)
+ goto err;
+ }
+
+ /*
+ * Switch to high-speed (if supported).
+ */
+ err = sdio_enable_hs(card);
+ if (err > 0)
+ mmc_sd_go_highspeed(card);
+ else if (err)
+ goto err;
+
+ /*
+ * Change to the card's maximum speed.
+ */
+ mmc_set_clock(host, mmc_sdio_get_max_clock(card));
+
+ err = sdio_enable_4bit_bus(card);
+ if (err > 0)
+ mmc_set_bus_width(host, MMC_BUS_WIDTH_4);
+ else if (err)
+ goto err;
+
+ mmc_release_host(host);
+ return 0;
+err:
+ printk("%s: Error resetting SDIO communications (%d)\n",
+ mmc_hostname(host), err);
+ mmc_release_host(host);
+ return err;
+}
+EXPORT_SYMBOL(sdio_reset_comm);
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index d2565df..52429a9 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -23,6 +23,10 @@
#include "sdio_cis.h"
#include "sdio_bus.h"
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+#include <linux/mmc/host.h>
+#endif
+
/* show configuration fields */
#define sdio_config_attr(field, format_string) \
static ssize_t \
@@ -260,7 +264,14 @@ static void sdio_release_func(struct device *dev)
{
struct sdio_func *func = dev_to_sdio_func(dev);
- sdio_free_func_cis(func);
+#ifdef CONFIG_MMC_EMBEDDED_SDIO
+ /*
+ * If this device is embedded then we never allocated
+ * cis tables for this func
+ */
+ if (!func->card->host->embedded_sdio_data.funcs)
+#endif
+ sdio_free_func_cis(func);
if (func->info)
kfree(func->info);
diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c
index 0f687cd..3169452 100644..100755
--- a/drivers/mmc/core/sdio_io.c
+++ b/drivers/mmc/core/sdio_io.c
@@ -383,6 +383,39 @@ u8 sdio_readb(struct sdio_func *func, unsigned int addr, int *err_ret)
EXPORT_SYMBOL_GPL(sdio_readb);
/**
+ * sdio_readb_ext - read a single byte from a SDIO function
+ * @func: SDIO function to access
+ * @addr: address to read
+ * @err_ret: optional status value from transfer
+ * @in: value to add to argument
+ *
+ * Reads a single byte from the address space of a given SDIO
+ * function. If there is a problem reading the address, 0xff
+ * is returned and @err_ret will contain the error code.
+ */
+unsigned char sdio_readb_ext(struct sdio_func *func, unsigned int addr,
+ int *err_ret, unsigned in)
+{
+ int ret;
+ unsigned char val;
+
+ BUG_ON(!func);
+
+ if (err_ret)
+ *err_ret = 0;
+
+ ret = mmc_io_rw_direct(func->card, 0, func->num, addr, (u8)in, &val);
+ if (ret) {
+ if (err_ret)
+ *err_ret = ret;
+ return 0xFF;
+ }
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(sdio_readb_ext);
+
+/**
* sdio_writeb - write a single byte to a SDIO function
* @func: SDIO function to access
* @b: byte to write
@@ -711,8 +744,11 @@ int sdio_set_host_pm_flags(struct sdio_func *func, mmc_pm_flag_t flags)
host = func->card->host;
+#ifdef CONFIG_MACH_PX
+#else
if (flags & ~host->pm_caps)
return -EINVAL;
+#endif
/* function suspend methods are serialized, hence no lock needed */
host->pm_flags |= flags;
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
index 03ead02..8b250c12 100644
--- a/drivers/mmc/core/sdio_irq.c
+++ b/drivers/mmc/core/sdio_irq.c
@@ -145,15 +145,21 @@ static int sdio_irq_thread(void *_host)
}
set_current_state(TASK_INTERRUPTIBLE);
- if (host->caps & MMC_CAP_SDIO_IRQ)
+ if (host->caps & MMC_CAP_SDIO_IRQ) {
+ mmc_host_clk_hold(host);
host->ops->enable_sdio_irq(host, 1);
+ mmc_host_clk_release(host);
+ }
if (!kthread_should_stop())
schedule_timeout(period);
set_current_state(TASK_RUNNING);
} while (!kthread_should_stop());
- if (host->caps & MMC_CAP_SDIO_IRQ)
+ if (host->caps & MMC_CAP_SDIO_IRQ) {
+ mmc_host_clk_hold(host);
host->ops->enable_sdio_irq(host, 0);
+ mmc_host_clk_release(host);
+ }
pr_debug("%s: IRQ thread exiting with code %d\n",
mmc_hostname(host), ret);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 56dbf3f..e231dcc 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -24,6 +24,36 @@ config MMC_PXA
If unsure, say N.
+config MMC_MSHCI
+ tristate "Mobile Storage Host Controller Interface support"
+ depends on HAS_DMA
+ help
+ This selects the Mobile Storage Host Controller Interface.
+ It is made by synopsys. It supports SD/MMC card.
+
+ If you have a controller with this interface, say Y or M here. You
+ also need to enable an appropriate bus interface.
+
+ If unsure, say N.
+
+config MMC_MSHCI_S3C_DMA_MAP
+ tristate "Use own S3C_DMA_MAP function for mshci"
+ depends on MMC_MSHCI
+ help
+ This selects using the s3c_dma_map_sg, s3c_unmap_sg functions.
+ Those functions are optimized for flushing cache.
+
+ If unsure, say N.
+
+config MMC_MSHCI_ASYNC_OPS
+ tristate "Use Asyn ops like pre_req, post_req"
+ depends on MMC_MSHCI
+ help
+ This selects using the pre_req and post_req functions.
+ These functions might make the performance of MMC better.
+
+ If unsure, say N.
+
config MMC_SDHCI
tristate "Secure Digital Host Controller Interface support"
depends on HAS_DMA
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 58a5cf7..7e8ec52 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -7,6 +7,10 @@ obj-$(CONFIG_MMC_PXA) += pxamci.o
obj-$(CONFIG_MMC_IMX) += imxmmc.o
obj-$(CONFIG_MMC_MXC) += mxcmmc.o
obj-$(CONFIG_MMC_MXS) += mxs-mmc.o
+obj-$(CONFIG_MMC_DW) += dw_mmc.o
+obj-$(CONFIG_MMC_MSHCI) += mshci.o
+obj-$(CONFIG_MMC_MSHCI) += mshci-s3c.o
+obj-$(CONFIG_MMC_MSHCI_S3C_DMA_MAP) += mshci-s3c-dma.o
obj-$(CONFIG_MMC_SDHCI) += sdhci.o
obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
obj-$(CONFIG_MMC_SDHCI_PXA) += sdhci-pxa.o
@@ -38,7 +42,6 @@ obj-$(CONFIG_MMC_SDHI) += sh_mobile_sdhi.o
obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
-obj-$(CONFIG_MMC_DW) += dw_mmc.o
obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o
obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o
obj-$(CONFIG_MMC_VUB300) += vub300.o
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 66dcddb..0305a70 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -34,6 +34,10 @@
#include <linux/bitops.h>
#include <linux/regulator/consumer.h>
+#include <plat/cpu.h>
+
+#include <mach/board_rev.h>
+
#include "dw_mmc.h"
/* Common flag combinations */
@@ -46,7 +50,15 @@
DW_MCI_CMD_ERROR_FLAGS | SDMMC_INT_HLE)
#define DW_MCI_SEND_STATUS 1
#define DW_MCI_RECV_STATUS 2
-#define DW_MCI_DMA_THRESHOLD 16
+#define DW_MCI_DMA_THRESHOLD 4
+
+/* Incresing sg_list size for eMMC 4.5 performance by incresing
+ max DMA Transfer size from 1MB to 4MB */
+#if defined(CONFIG_MACH_P10)
+#define SG_LIST_ALLOC_SIZE (PAGE_SIZE * 4)
+#else
+#define SG_LIST_ALLOC_SIZE PAGE_SIZE
+#endif
#ifdef CONFIG_MMC_DW_IDMAC
struct idmac_desc {
@@ -61,7 +73,7 @@ struct idmac_desc {
u32 des1; /* Buffer sizes */
#define IDMAC_SET_BUFFER1_SIZE(d, s) \
- ((d)->des1 = ((d)->des1 & 0x03ffc000) | ((s) & 0x3fff))
+ ((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff))
u32 des2; /* buffer 1 physical address */
@@ -100,6 +112,27 @@ struct dw_mci_slot {
int last_detect_state;
};
+#define MAX_TUING_LOOP 40
+
+static const u8 tuning_blk_pattern[] = {
+ 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
+ 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
+ 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
+ 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
+ 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
+ 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
+ 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
+ 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
+ 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
+ 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
+ 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
+ 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
+ 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
+ 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
+ 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
+ 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
+};
+
#if defined(CONFIG_DEBUG_FS)
static int dw_mci_req_show(struct seq_file *s, void *v)
{
@@ -221,6 +254,54 @@ err:
}
#endif /* defined(CONFIG_DEBUG_FS) */
+static void dw_mci_clear_set_irqs(struct dw_mci *host, u32 clear, u32 set)
+{
+ u32 ier;
+
+ /* clear interrupt */
+ mci_writel(host, RINTSTS, clear);
+
+ ier = mci_readl(host, INTMASK);
+
+ ier &= ~clear;
+ ier |= set;
+
+ mci_writel(host, INTMASK, ier);
+}
+
+static void dw_mci_unmask_irqs(struct dw_mci *host, u32 irqs)
+{
+ dw_mci_clear_set_irqs(host, 0, irqs);
+}
+
+static void dw_mci_mask_irqs(struct dw_mci *host, u32 irqs)
+{
+ dw_mci_clear_set_irqs(host, irqs, 0);
+}
+
+static void dw_mci_set_card_detection(struct dw_mci *host, bool enable)
+{
+ u32 irqs = SDMMC_INT_CD;
+
+ if (host->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
+ return;
+
+ if (enable)
+ dw_mci_unmask_irqs(host, irqs);
+ else
+ dw_mci_mask_irqs(host, irqs);
+}
+
+static void dw_mci_enable_card_detection(struct dw_mci *host)
+{
+ dw_mci_set_card_detection(host, true);
+}
+
+static void dw_mci_disable_card_detection(struct dw_mci *host)
+{
+ dw_mci_set_card_detection(host, false);
+}
+
static void dw_mci_set_timeout(struct dw_mci *host)
{
/* timeout (maximum) */
@@ -230,6 +311,7 @@ static void dw_mci_set_timeout(struct dw_mci *host)
static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
{
struct mmc_data *data;
+ struct dw_mci_slot *slot = mmc_priv(mmc);
u32 cmdr;
cmd->error = -EINPROGRESS;
@@ -259,6 +341,10 @@ static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd)
cmdr |= SDMMC_CMD_DAT_WR;
}
+ /* Use hold bit register */
+ if (slot->host->pdata->set_io_timing)
+ cmdr |= SDMMC_USE_HOLD_REG;
+
return cmdr;
}
@@ -284,7 +370,7 @@ static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data)
/* DMA interface functions */
static void dw_mci_stop_dma(struct dw_mci *host)
{
- if (host->use_dma) {
+ if (host->using_dma) {
host->dma_ops->stop(host);
host->dma_ops->cleanup(host);
} else {
@@ -299,9 +385,10 @@ static void dw_mci_dma_cleanup(struct dw_mci *host)
struct mmc_data *data = host->data;
if (data)
- dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len,
- ((data->flags & MMC_DATA_WRITE)
- ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
+ if (!data->host_cookie)
+ dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len,
+ ((data->flags & MMC_DATA_WRITE)
+ ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
}
static void dw_mci_idmac_stop_dma(struct dw_mci *host)
@@ -398,7 +485,7 @@ static int dw_mci_idmac_init(struct dw_mci *host)
int i;
/* Number of descriptors in the ring buffer */
- host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
+ host->ring_size = host->buf_size / sizeof(struct idmac_desc);
/* Forward link the descriptor list */
for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
@@ -417,24 +504,15 @@ static int dw_mci_idmac_init(struct dw_mci *host)
return 0;
}
-static struct dw_mci_dma_ops dw_mci_idmac_ops = {
- .init = dw_mci_idmac_init,
- .start = dw_mci_idmac_start_dma,
- .stop = dw_mci_idmac_stop_dma,
- .complete = dw_mci_idmac_complete_dma,
- .cleanup = dw_mci_dma_cleanup,
-};
-#endif /* CONFIG_MMC_DW_IDMAC */
-
-static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
+static int dw_mci_pre_dma_transfer(struct dw_mci *host,
+ struct mmc_data *data,
+ int next)
{
struct scatterlist *sg;
- unsigned int i, direction, sg_len;
- u32 temp;
+ int i, sg_len;
- /* If we don't have a channel, we can't do DMA */
- if (!host->use_dma)
- return -ENODEV;
+ if (!next && data->host_cookie)
+ return data->host_cookie;
/*
* We don't do DMA on "complex" transfers, i.e. with
@@ -443,6 +521,7 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
*/
if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD)
return -EINVAL;
+
if (data->blksz & 3)
return -EINVAL;
@@ -451,13 +530,95 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
return -EINVAL;
}
- if (data->flags & MMC_DATA_READ)
- direction = DMA_FROM_DEVICE;
- else
- direction = DMA_TO_DEVICE;
+ sg_len = dma_map_sg(&host->pdev->dev, data->sg,
+ data->sg_len, ((data->flags & MMC_DATA_WRITE)
+ ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
+ if (sg_len == 0)
+ return -EINVAL;
+
+ if (next)
+ data->host_cookie = sg_len;
- sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len,
- direction);
+ return sg_len;
+}
+
+static void dw_mci_pre_req(struct mmc_host *mmc,
+ struct mmc_request *mrq,
+ bool is_first_req)
+{
+ struct dw_mci_slot *slot = mmc_priv(mmc);
+ struct mmc_data *data = mrq->data;
+
+ if (!data)
+ return;
+
+ if (data->host_cookie) {
+ data->host_cookie = 0;
+ return;
+ }
+
+ if (slot->host->use_dma) {
+ if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0)
+ data->host_cookie = 0;
+ }
+}
+
+static void dw_mci_post_req(struct mmc_host *mmc,
+ struct mmc_request *mrq,
+ int err)
+{
+ struct dw_mci_slot *slot = mmc_priv(mmc);
+ struct mmc_data *data = mrq->data;
+
+ if (!data)
+ return;
+
+ if (slot->host->use_dma) {
+ if (data->host_cookie)
+ dma_unmap_sg(&slot->host->pdev->dev, data->sg,
+ data->sg_len,
+ ((data->flags & MMC_DATA_WRITE)
+ ? DMA_TO_DEVICE : DMA_FROM_DEVICE));
+ data->host_cookie = 0;
+ }
+}
+
+static struct dw_mci_dma_ops dw_mci_idmac_ops = {
+ .init = dw_mci_idmac_init,
+ .start = dw_mci_idmac_start_dma,
+ .stop = dw_mci_idmac_stop_dma,
+ .complete = dw_mci_idmac_complete_dma,
+ .cleanup = dw_mci_dma_cleanup,
+};
+#else
+static int dw_mci_pre_dma_transfer(struct dw_mci *host,
+ struct mmc_data *data,
+ bool next)
+{
+ return -ENOSYS;
+}
+#define dw_mci_pre_req NULL
+#define dw_mci_post_req NULL
+#endif /* CONFIG_MMC_DW_IDMAC */
+
+static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
+{
+ int sg_len;
+ u32 temp;
+
+ host->using_dma = 0;
+
+ /* If we don't have a channel, we can't do DMA */
+ if (!host->use_dma)
+ return -ENODEV;
+
+ sg_len = dw_mci_pre_dma_transfer(host, data, 0);
+ if (sg_len < 0) {
+ host->dma_ops->stop(host);
+ return sg_len;
+ }
+
+ host->using_dma = 1;
dev_vdbg(&host->pdev->dev,
"sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n",
@@ -470,6 +631,7 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data)
mci_writel(host, CTRL, temp);
/* Disable RX/TX IRQs, let DMA handle it */
+ mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
temp = mci_readl(host, INTMASK);
temp &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR);
mci_writel(host, INTMASK, temp);
@@ -490,13 +652,20 @@ static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
host->data = data;
if (dw_mci_submit_data_dma(host, data)) {
+ int flags = SG_MITER_ATOMIC;
+ if (host->data->flags & MMC_DATA_READ)
+ flags |= SG_MITER_TO_SG;
+ else
+ flags |= SG_MITER_FROM_SG;
+
+ sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
host->sg = data->sg;
- host->pio_offset = 0;
if (data->flags & MMC_DATA_READ)
host->dir_status = DW_MCI_RECV_STATUS;
else
host->dir_status = DW_MCI_SEND_STATUS;
+ mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR);
temp = mci_readl(host, INTMASK);
temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR;
mci_writel(host, INTMASK, temp);
@@ -574,17 +743,17 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot)
}
/* Set the current slot bus width */
- mci_writel(host, CTYPE, slot->ctype);
+ mci_writel(host, CTYPE, (slot->ctype << slot->id));
}
-static void dw_mci_start_request(struct dw_mci *host,
- struct dw_mci_slot *slot)
+static void __dw_mci_start_request(struct dw_mci *host,
+ struct dw_mci_slot *slot, struct mmc_command *cmd)
{
struct mmc_request *mrq;
- struct mmc_command *cmd;
struct mmc_data *data;
u32 cmdflags;
+ host->prv_err = 0;
mrq = slot->mrq;
if (host->pdata->select_slot)
host->pdata->select_slot(slot->id);
@@ -599,14 +768,13 @@ static void dw_mci_start_request(struct dw_mci *host,
host->completed_events = 0;
host->data_status = 0;
- data = mrq->data;
+ data = cmd->data;
if (data) {
dw_mci_set_timeout(host);
mci_writel(host, BYTCNT, data->blksz*data->blocks);
mci_writel(host, BLKSIZ, data->blksz);
}
- cmd = mrq->cmd;
cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
/* this is the first command, send the initialization clock */
@@ -624,6 +792,17 @@ static void dw_mci_start_request(struct dw_mci *host,
host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
}
+static void dw_mci_start_request(struct dw_mci *host,
+ struct dw_mci_slot *slot)
+{
+ struct mmc_request *mrq = slot->mrq;
+ struct mmc_command *cmd;
+
+ cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
+ __dw_mci_start_request(host, slot, cmd);
+}
+
+/* must be called with host->lock held */
static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
struct mmc_request *mrq)
{
@@ -647,15 +826,44 @@ static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
{
struct dw_mci_slot *slot = mmc_priv(mmc);
struct dw_mci *host = slot->host;
+ ktime_t expr;
+ u64 add_time = 50000; /* 50us */
+ int timeout = 100000;
WARN_ON(slot->mrq);
if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) {
mrq->cmd->error = -ENOMEDIUM;
+ host->prv_err = 1;
mmc_request_done(mmc, mrq);
return;
}
+ do {
+ if (mrq->cmd->opcode == MMC_STOP_TRANSMISSION)
+ break;
+
+ if (mci_readl(host, STATUS) & (1 << 9)) {
+ if (!timeout) {
+ printk(KERN_ERR "%s: Data0: Never released\n",
+ mmc_hostname(mmc));
+ mrq->cmd->error = -ENOTRECOVERABLE;
+ host->prv_err = 1;
+ mmc_request_done(mmc, mrq);
+ return;
+ }
+ if (host->prv_err) {
+ udelay(10);
+ } else {
+ expr = ktime_add_ns(ktime_get(), add_time);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_hrtimeout(&expr, HRTIMER_MODE_ABS);
+ }
+ timeout--;
+ } else
+ break;
+ } while(1);
+
/* We don't support multiple blocks of weird lengths. */
dw_mci_queue_request(host, slot, mrq);
}
@@ -680,12 +888,19 @@ static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
break;
}
+ regs = mci_readl(slot->host, UHS_REG);
+
/* DDR mode set */
- if (ios->ddr) {
- regs = mci_readl(slot->host, UHS_REG);
+ if (ios->timing == MMC_TIMING_UHS_DDR50)
regs |= (0x1 << slot->id) << 16;
- mci_writel(slot->host, UHS_REG, regs);
- }
+ else
+ /* 1, 4, 8 Bit SDR */
+ regs &= ~(0x1 << slot->id) << 16;
+
+ mci_writel(slot->host, UHS_REG, regs);
+
+ if (slot->host->pdata->set_io_timing)
+ slot->host->pdata->set_io_timing(slot->host, ios->timing);
if (ios->clock) {
/*
@@ -702,6 +917,9 @@ static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
default:
break;
}
+
+ if (slot->host->pdata->cfg_gpio)
+ slot->host->pdata->cfg_gpio(mmc->ios.bus_width);
}
static int dw_mci_get_ro(struct mmc_host *mmc)
@@ -746,11 +964,186 @@ static int dw_mci_get_cd(struct mmc_host *mmc)
return present;
}
+static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
+{
+ struct dw_mci_slot *slot = mmc_priv(mmc);
+ struct dw_mci *host = slot->host;
+ u32 int_mask;
+
+ /* Enable/disable Slot Specific SDIO interrupt */
+ int_mask = mci_readl(host, INTMASK);
+ if (enb) {
+ mci_writel(host, INTMASK,
+ (int_mask | (1 << SDMMC_INT_SDIO(slot->id))));
+ } else {
+ mci_writel(host, INTMASK,
+ (int_mask & ~(1 << SDMMC_INT_SDIO(slot->id))));
+ }
+}
+
+static u8 dw_mci_tuning_sampling(struct dw_mci * host)
+{
+ u32 clksel;
+ u8 sample;
+
+ clksel = mci_readl(host, CLKSEL);
+ sample = clksel & 0x7;
+ sample = (++sample == 8) ? 0 : sample;
+ clksel = (clksel & 0xfffffff8) | (sample & 0x7);
+ mci_writel(host, CLKSEL, clksel);
+
+ return sample;
+}
+
+static void dw_mci_set_sampling(struct dw_mci * host, u8 sample)
+{
+ u32 clksel;
+
+ clksel = mci_readl(host, CLKSEL);
+ clksel = (clksel & 0xfffffff8) | (sample & 0x7);
+ mci_writel(host, CLKSEL, clksel);
+}
+
+static u8 dw_mci_get_sampling(struct dw_mci * host)
+{
+ u32 clksel;
+ u8 sample;
+
+ clksel = mci_readl(host, CLKSEL);
+ sample = clksel & 0x7;
+
+ return sample;
+}
+
+static u8 get_median_sample(u8 map)
+{
+ u8 min = 0, max = 0;
+ u8 pos;
+ u8 i;
+
+ for (i = 0; i < 4; i++) {
+ if ((map >> (4 + i)) & 0x1)
+ max = 4 + i;
+ if ((map >> (3 - i)) & 0x1)
+ min = 3 - i;
+ }
+
+ pos = max;
+ do {
+ max = pos;
+ pos = DIV_ROUND_CLOSEST(min + max, 2);
+ if ((map >> pos) & 0x1)
+ break;
+
+ } while(pos != max);
+
+ return pos;
+}
+
+static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode)
+{
+ struct dw_mci_slot *slot = mmc_priv(mmc);
+ struct dw_mci *host = slot->host;
+ unsigned int tuning_loop = MAX_TUING_LOOP;
+ u8 *tuning_blk;
+ u8 blksz;
+ u8 tune, start_tune;
+ u8 map = 0, mid;
+
+ if (opcode == MMC_SEND_TUNING_BLOCK_HS200) {
+ if (mmc->ios.bus_width == MMC_BUS_WIDTH_8)
+ blksz = 128;
+ else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4)
+ blksz = 64;
+ else
+ return -EINVAL;
+ } else if (opcode == MMC_SEND_TUNING_BLOCK) {
+ blksz = 64;
+ } else {
+ dev_err(&mmc->class_dev,
+ "Undefined command(%d) for tuning\n",
+ opcode);
+ return -EINVAL;
+ }
+
+ tuning_blk = kmalloc(blksz, GFP_KERNEL);
+ if (!tuning_blk)
+ return -ENOMEM;
+
+ start_tune = dw_mci_get_sampling(host);
+
+ do {
+ struct mmc_request mrq = {NULL};
+ struct mmc_command cmd = {0};
+ struct mmc_command stop = {0};
+ struct mmc_data data = {0};
+ struct scatterlist sg;
+
+ cmd.opcode = opcode;
+ cmd.arg = 0;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ stop.opcode = MMC_STOP_TRANSMISSION;
+ stop.arg = 0;
+ stop.flags = MMC_RSP_R1B | MMC_CMD_AC;
+
+ data.blksz = blksz;
+ data.blocks = 1;
+ data.flags = MMC_DATA_READ;
+ data.sg = &sg;
+ data.sg_len = 1;
+
+ sg_init_one(&sg, tuning_blk, blksz);
+ dw_mci_set_timeout(host);
+
+ mrq.cmd = &cmd;
+ mrq.stop = &stop;
+ mrq.data = &data;
+ host->mrq = &mrq;
+
+ tune = dw_mci_tuning_sampling(host);
+
+ mmc_wait_for_req(mmc, &mrq);
+
+ if (!cmd.error && !data.error) {
+ if (!memcmp(tuning_blk_pattern, tuning_blk, blksz))
+ map |= (1 << tune);
+ } else {
+ dev_dbg(&mmc->class_dev,
+ "Tuning error: cmd.error:%d, data.error:%d\n",
+ cmd.error, data.error);
+ }
+
+ if (start_tune == tune) {
+ if (!map) {
+ tuning_loop = 0;
+ break;
+ }
+
+ mid = get_median_sample(map);
+ dw_mci_set_sampling(host, mid);
+ break;
+ }
+
+ } while(--tuning_loop);
+
+ kfree(tuning_blk);
+
+ if (!tuning_loop)
+ return -EIO;
+
+ return 0;
+}
+
static const struct mmc_host_ops dw_mci_ops = {
- .request = dw_mci_request,
- .set_ios = dw_mci_set_ios,
- .get_ro = dw_mci_get_ro,
- .get_cd = dw_mci_get_cd,
+ .request = dw_mci_request,
+ .pre_req = dw_mci_pre_req,
+ .post_req = dw_mci_post_req,
+ .set_ios = dw_mci_set_ios,
+ .get_ro = dw_mci_get_ro,
+ .get_cd = dw_mci_get_cd,
+ .enable_sdio_irq = dw_mci_enable_sdio_irq,
+ .execute_tuning = dw_mci_execute_tuning,
};
static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq)
@@ -821,6 +1214,7 @@ static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd
host->data = NULL;
dw_mci_stop_dma(host);
}
+ host->prv_err = 1;
}
}
@@ -853,7 +1247,13 @@ static void dw_mci_tasklet_func(unsigned long priv)
cmd = host->cmd;
host->cmd = NULL;
set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
- dw_mci_command_complete(host, host->mrq->cmd);
+ dw_mci_command_complete(host, cmd);
+ if ((cmd == host->mrq->sbc) && !cmd->error) {
+ prev_state = state = STATE_SENDING_CMD;
+ __dw_mci_start_request(host, host->cur_slot, host->mrq->cmd);
+ goto unlock;
+ }
+
if (!host->mrq->data || cmd->error) {
dw_mci_request_end(host, host->mrq);
goto unlock;
@@ -905,6 +1305,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
status);
data->error = -EIO;
}
+ host->prv_err = 1;
} else {
data->bytes_xfered = data->blocks * data->blksz;
data->error = 0;
@@ -915,6 +1316,12 @@ static void dw_mci_tasklet_func(unsigned long priv)
goto unlock;
}
+ if (host->mrq->sbc && !data->error) {
+ data->stop->error = 0;
+ dw_mci_request_end(host, host->mrq);
+ goto unlock;
+ }
+
prev_state = state = STATE_SENDING_STOP;
if (!data->error)
send_stop_cmd(host, data);
@@ -954,7 +1361,7 @@ static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt)
cnt = cnt >> 1;
while (cnt > 0) {
- mci_writew(host, DATA, *pdata++);
+ mci_writew(host, DATA(host->data_offset), *pdata++);
cnt--;
}
}
@@ -967,7 +1374,7 @@ static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt)
cnt = cnt >> 1;
while (cnt > 0) {
- *pdata++ = mci_readw(host, DATA);
+ *pdata++ = mci_readw(host, DATA(host->data_offset));
cnt--;
}
}
@@ -981,7 +1388,7 @@ static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt)
cnt = cnt >> 2;
while (cnt > 0) {
- mci_writel(host, DATA, *pdata++);
+ mci_writel(host, DATA(host->data_offset), *pdata++);
cnt--;
}
}
@@ -995,7 +1402,7 @@ static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt)
cnt = cnt >> 2;
while (cnt > 0) {
- *pdata++ = mci_readl(host, DATA);
+ *pdata++ = mci_readl(host, DATA(host->data_offset));
cnt--;
}
}
@@ -1008,7 +1415,7 @@ static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt)
cnt = cnt >> 3;
while (cnt > 0) {
- mci_writeq(host, DATA, *pdata++);
+ mci_writeq(host, DATA(host->data_offset), *pdata++);
cnt--;
}
}
@@ -1021,60 +1428,49 @@ static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt)
cnt = cnt >> 3;
while (cnt > 0) {
- *pdata++ = mci_readq(host, DATA);
+ *pdata++ = mci_readq(host, DATA(host->data_offset));
cnt--;
}
}
static void dw_mci_read_data_pio(struct dw_mci *host)
{
- struct scatterlist *sg = host->sg;
- void *buf = sg_virt(sg);
- unsigned int offset = host->pio_offset;
+ struct sg_mapping_iter *sg_miter = &host->sg_miter;
+ void *buf;
+ unsigned int offset;
struct mmc_data *data = host->data;
int shift = host->data_shift;
u32 status;
unsigned int nbytes = 0, len;
+ unsigned int remain, fcnt;
do {
- len = SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift;
- if (offset + len <= sg->length) {
- host->pull_data(host, (void *)(buf + offset), len);
+ if (!sg_miter_next(sg_miter))
+ goto done;
- offset += len;
- nbytes += len;
-
- if (offset == sg->length) {
- flush_dcache_page(sg_page(sg));
- host->sg = sg = sg_next(sg);
- if (!sg)
- goto done;
+ buf = sg_miter->addr;
+ remain = sg_miter->length;
+ offset = 0;
- offset = 0;
- buf = sg_virt(sg);
- }
- } else {
- unsigned int remaining = sg->length - offset;
- host->pull_data(host, (void *)(buf + offset),
- remaining);
- nbytes += remaining;
-
- flush_dcache_page(sg_page(sg));
- host->sg = sg = sg_next(sg);
- if (!sg)
- goto done;
-
- offset = len - remaining;
- buf = sg_virt(sg);
- host->pull_data(host, buf, offset);
- nbytes += offset;
- }
+ do {
+ fcnt = SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift;
+ len = min(remain, fcnt);
+ if (!len)
+ break;
+ host->pull_data(host, (void *)(buf + offset), len);
+ nbytes += len;
+ offset += len;
+ remain -= len;
+ } while (remain);
+ sg_miter->consumed = offset;
status = mci_readl(host, MINTSTS);
mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
if (status & DW_MCI_DATA_ERROR_FLAGS) {
host->data_status = status;
data->bytes_xfered += nbytes;
+ sg_miter_stop(sg_miter);
+ host->sg = NULL;
smp_wmb();
set_bit(EVENT_DATA_ERROR, &host->pending_events);
@@ -1083,65 +1479,64 @@ static void dw_mci_read_data_pio(struct dw_mci *host)
return;
}
} while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/
- len = SDMMC_GET_FCNT(mci_readl(host, STATUS));
- host->pio_offset = offset;
data->bytes_xfered += nbytes;
+
+ if (!remain) {
+ if (!sg_miter_next(sg_miter))
+ goto done;
+ sg_miter->consumed = 0;
+ }
+ sg_miter_stop(sg_miter);
return;
done:
data->bytes_xfered += nbytes;
+ sg_miter_stop(sg_miter);
+ host->sg = NULL;
smp_wmb();
set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
}
static void dw_mci_write_data_pio(struct dw_mci *host)
{
- struct scatterlist *sg = host->sg;
- void *buf = sg_virt(sg);
- unsigned int offset = host->pio_offset;
+ struct sg_mapping_iter *sg_miter = &host->sg_miter;
+ void *buf;
+ unsigned int offset;
struct mmc_data *data = host->data;
int shift = host->data_shift;
u32 status;
unsigned int nbytes = 0, len;
+ unsigned int fifo_depth = host->fifo_depth;
+ unsigned int remain, fcnt;
do {
- len = SDMMC_FIFO_SZ -
- (SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift);
- if (offset + len <= sg->length) {
+ if (!sg_miter_next(sg_miter))
+ goto done;
+
+ buf = sg_miter->addr;
+ remain = sg_miter->length;
+ offset = 0;
+
+ do {
+ fcnt = SDMMC_GET_FCNT(mci_readl(host, STATUS));
+ fcnt = (fifo_depth - fcnt) << shift;
+ len = min(remain, fcnt);
+ if (!len)
+ break;
host->push_data(host, (void *)(buf + offset), len);
-
- offset += len;
nbytes += len;
- if (offset == sg->length) {
- host->sg = sg = sg_next(sg);
- if (!sg)
- goto done;
-
- offset = 0;
- buf = sg_virt(sg);
- }
- } else {
- unsigned int remaining = sg->length - offset;
-
- host->push_data(host, (void *)(buf + offset),
- remaining);
- nbytes += remaining;
-
- host->sg = sg = sg_next(sg);
- if (!sg)
- goto done;
-
- offset = len - remaining;
- buf = sg_virt(sg);
- host->push_data(host, (void *)buf, offset);
- nbytes += offset;
- }
+ offset += len;
+ remain -= len;
+ } while (remain);
+ sg_miter->consumed = offset;
status = mci_readl(host, MINTSTS);
mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
if (status & DW_MCI_DATA_ERROR_FLAGS) {
host->data_status = status;
data->bytes_xfered += nbytes;
+ sg_miter_stop(sg_miter);
+ host->sg = NULL;
smp_wmb();
@@ -1151,14 +1546,20 @@ static void dw_mci_write_data_pio(struct dw_mci *host)
return;
}
} while (status & SDMMC_INT_TXDR); /* if TXDR write again */
-
- host->pio_offset = offset;
data->bytes_xfered += nbytes;
+ if (!remain) {
+ if (!sg_miter_next(sg_miter))
+ goto done;
+ sg_miter->consumed = 0;
+ }
+ sg_miter_stop(sg_miter);
return;
done:
data->bytes_xfered += nbytes;
+ sg_miter_stop(sg_miter);
+ host->sg = NULL;
smp_wmb();
set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
}
@@ -1179,6 +1580,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
struct dw_mci *host = dev_id;
u32 status, pending;
unsigned int pass_count = 0;
+ int i;
do {
status = mci_readl(host, RINTSTS);
@@ -1202,7 +1604,8 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
host->cmd_status = status;
smp_wmb();
set_bit(EVENT_CMD_COMPLETE, &host->pending_events);
- tasklet_schedule(&host->tasklet);
+ if (!(pending & SDMMC_INT_RTO))
+ tasklet_schedule(&host->tasklet);
}
if (pending & DW_MCI_DATA_ERROR_FLAGS) {
@@ -1211,7 +1614,8 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
host->data_status = status;
smp_wmb();
set_bit(EVENT_DATA_ERROR, &host->pending_events);
- tasklet_schedule(&host->tasklet);
+ if (!(pending & SDMMC_INT_DTO))
+ tasklet_schedule(&host->tasklet);
}
if (pending & SDMMC_INT_DATA_OVER) {
@@ -1223,6 +1627,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
if (host->sg != NULL)
dw_mci_read_data_pio(host);
}
+
set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
tasklet_schedule(&host->tasklet);
}
@@ -1249,6 +1654,15 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
tasklet_schedule(&host->card_tasklet);
}
+ /* Handle SDIO Interrupts */
+ for (i = 0; i < host->num_slots; i++) {
+ struct dw_mci_slot *slot = host->slot[i];
+ if (pending & SDMMC_INT_SDIO(i)) {
+ mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i));
+ mmc_signal_sdio_irq(slot->mmc);
+ }
+ }
+
} while (pass_count++ < 5);
#ifdef CONFIG_MMC_DW_IDMAC
@@ -1308,6 +1722,7 @@ static void dw_mci_tasklet_card(unsigned long data)
break;
case STATE_SENDING_CMD:
mrq->cmd->error = -ENOMEDIUM;
+ host->prv_err = 1;
if (!mrq->data)
break;
/* fall through */
@@ -1331,6 +1746,7 @@ static void dw_mci_tasklet_card(unsigned long data)
} else {
list_del(&slot->queue_node);
mrq->cmd->error = -ENOMEDIUM;
+ host->prv_err = 1;
if (mrq->data)
mrq->data->error = -ENOMEDIUM;
if (mrq->stop)
@@ -1353,6 +1769,7 @@ static void dw_mci_tasklet_card(unsigned long data)
* block interrupt, hence setting the
* scatter-gather pointer to NULL.
*/
+ sg_miter_stop(&host->sg_miter);
host->sg = NULL;
ctrl = mci_readl(host, CTRL);
@@ -1376,6 +1793,34 @@ static void dw_mci_tasklet_card(unsigned long data)
}
}
+static void dw_mci_notify_change(struct platform_device *dev, int state)
+{
+ struct dw_mci *host = platform_get_drvdata(dev);
+ unsigned long flags;
+
+ if (host) {
+ spin_lock_irqsave(&host->lock, flags);
+ if (state) {
+ dev_dbg(&dev->dev, "card inserted.\n");
+ host->quirks |= DW_MCI_QUIRK_BROKEN_CARD_DETECTION;
+ } else {
+ dev_dbg(&dev->dev, "card removed.\n");
+ host->quirks &= ~DW_MCI_QUIRK_BROKEN_CARD_DETECTION;
+ }
+ tasklet_schedule(&host->card_tasklet);
+ spin_unlock_irqrestore(&host->lock, flags);
+ }
+}
+
+static irqreturn_t dw_mci_detect_interrupt(int irq, void *dev_id)
+{
+ struct dw_mci_slot *slot = dev_id;
+
+ tasklet_schedule(&slot->host->card_tasklet);
+
+ return IRQ_HANDLED;
+}
+
static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
{
struct mmc_host *mmc;
@@ -1411,20 +1856,18 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
else
mmc->caps = 0;
+ if (host->pdata->caps2)
+ mmc->caps2 = host->pdata->caps2;
+ else
+ mmc->caps2 = 0;
+
if (host->pdata->get_bus_wd)
if (host->pdata->get_bus_wd(slot->id) >= 4)
mmc->caps |= MMC_CAP_4_BIT_DATA;
if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED)
- mmc->caps |= MMC_CAP_SD_HIGHSPEED;
+ mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
-#ifdef CONFIG_MMC_DW_IDMAC
- mmc->max_segs = host->ring_size;
- mmc->max_blk_size = 65536;
- mmc->max_blk_count = host->ring_size;
- mmc->max_seg_size = 0x1000;
- mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count;
-#else
if (host->pdata->blk_settings) {
mmc->max_segs = host->pdata->blk_settings->max_segs;
mmc->max_blk_size = host->pdata->blk_settings->max_blk_size;
@@ -1432,14 +1875,21 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
mmc->max_req_size = host->pdata->blk_settings->max_req_size;
mmc->max_seg_size = host->pdata->blk_settings->max_seg_size;
} else {
+#ifdef CONFIG_MMC_DW_IDMAC
+ mmc->max_segs = host->ring_size;
+ mmc->max_blk_size = 65536;
+ mmc->max_seg_size = 0x1000;
+ mmc->max_req_size = mmc->max_seg_size * host->ring_size;
+ mmc->max_blk_count = mmc->max_req_size / 512;
+#else
/* Useful defaults if platform data is unset. */
mmc->max_segs = 64;
mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */
mmc->max_blk_count = 512;
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
mmc->max_seg_size = mmc->max_req_size;
- }
#endif /* CONFIG_MMC_DW_IDMAC */
+ }
host->vmmc = regulator_get(mmc_dev(mmc), "vmmc");
if (IS_ERR(host->vmmc)) {
@@ -1448,6 +1898,8 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
} else
regulator_enable(host->vmmc);
+ host->pdata->init(id, dw_mci_detect_interrupt, host);
+
if (dw_mci_get_cd(mmc))
set_bit(DW_MMC_CARD_PRESENT, &slot->flags);
else
@@ -1486,8 +1938,13 @@ static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id)
static void dw_mci_init_dma(struct dw_mci *host)
{
+ if (host->pdata->buf_size)
+ host->buf_size = host->pdata->buf_size;
+ else
+ host->buf_size = PAGE_SIZE;
+
/* Alloc memory for sg translation */
- host->sg_cpu = dma_alloc_coherent(&host->pdev->dev, PAGE_SIZE,
+ host->sg_cpu = dma_alloc_coherent(&host->pdev->dev, host->buf_size,
&host->sg_dma, GFP_KERNEL);
if (!host->sg_cpu) {
dev_err(&host->pdev->dev, "%s: could not alloc DMA memory\n",
@@ -1588,6 +2045,28 @@ static int dw_mci_probe(struct platform_device *pdev)
goto err_freehost;
}
+ host->hclk = clk_get(&pdev->dev, pdata->hclk_name);
+ if (IS_ERR(host->hclk)) {
+ dev_err(&pdev->dev,
+ "failed to get hclk\n");
+ ret = PTR_ERR(host->hclk);
+ goto err_freehost;
+ }
+ clk_enable(host->hclk);
+
+ host->cclk = clk_get(&pdev->dev, pdata->cclk_name);
+ if (IS_ERR(host->cclk)) {
+ dev_err(&pdev->dev,
+ "failed to get cclk\n");
+ ret = PTR_ERR(host->cclk);
+ goto err_free_hclk;
+ }
+ clk_enable(host->cclk);
+
+ if ((soc_is_exynos4412() || soc_is_exynos4212())
+ && (samsung_rev() < EXYNOS4412_REV_1_0))
+ pdata->bus_hz = 66 * 1000 * 1000;
+
host->bus_hz = pdata->bus_hz;
host->quirks = pdata->quirks;
@@ -1597,7 +2076,7 @@ static int dw_mci_probe(struct platform_device *pdev)
ret = -ENOMEM;
host->regs = ioremap(regs->start, regs->end - regs->start + 1);
if (!host->regs)
- goto err_freehost;
+ goto err_free_cclk;
host->dma_ops = pdata->dma_ops;
dw_mci_init_dma(host);
@@ -1645,8 +2124,19 @@ static int dw_mci_probe(struct platform_device *pdev)
* FIFO threshold settings RxMark = fifo_size / 2 - 1,
* Tx Mark = fifo_size / 2 DMA Size = 8
*/
- fifo_size = mci_readl(host, FIFOTH);
- fifo_size = (fifo_size >> 16) & 0x7ff;
+ if (!host->pdata->fifo_depth) {
+ /*
+ * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may
+ * have been overwritten by the bootloader, just like we're
+ * about to do, so if you know the value for your hardware, you
+ * should put it in the platform data.
+ */
+ fifo_size = mci_readl(host, FIFOTH);
+ fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
+ } else {
+ fifo_size = host->pdata->fifo_depth;
+ }
+ host->fifo_depth = fifo_size;
host->fifoth_val = ((0x2 << 28) | ((fifo_size/2 - 1) << 16) |
((fifo_size/2) << 0));
mci_writel(host, FIFOTH, host->fifoth_val);
@@ -1680,6 +2170,24 @@ static int dw_mci_probe(struct platform_device *pdev)
}
/*
+ * In 2.40a spec, Data offset is changed.
+ * Need to check the version-id and set data-offset for DATA register.
+ */
+ host->verid = SDMMC_GET_VERID(mci_readl(host, VERID));
+ dev_info(&pdev->dev, "Version ID is %04x\n", host->verid);
+
+ if (host->verid < DW_MMC_240A)
+ host->data_offset = DATA_OFFSET;
+ else
+ host->data_offset = DATA_240A_OFFSET;
+
+ if (host->pdata->cd_type == DW_MCI_CD_EXTERNAL) {
+ host->pdata->ext_cd_init(&dw_mci_notify_change);
+ if (host->pdata->caps == MMC_CAP_UHS_SDR50 && samsung_rev() >= EXYNOS5250_REV_1_0)
+ clk_set_rate(host->cclk, 200 * 100 * 100);
+ }
+
+ /*
* Enable interrupts for command done, data over, data empty, card det,
* receive ready and error such as transmit, receive timeout, crc error
*/
@@ -1690,7 +2198,9 @@ static int dw_mci_probe(struct platform_device *pdev)
mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */
dev_info(&pdev->dev, "DW MMC controller at irq %d, "
- "%d bit host data width\n", irq, width);
+ "%d bit host data width, "
+ "%u deep fifo\n",
+ irq, width, fifo_size);
if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO)
dev_info(&pdev->dev, "Internal DMAC interrupt fix enabled.\n");
@@ -1708,7 +2218,7 @@ err_init_slot:
err_dmaunmap:
if (host->use_dma && host->dma_ops->exit)
host->dma_ops->exit(host);
- dma_free_coherent(&host->pdev->dev, PAGE_SIZE,
+ dma_free_coherent(&host->pdev->dev, host->buf_size,
host->sg_cpu, host->sg_dma);
iounmap(host->regs);
@@ -1717,6 +2227,13 @@ err_dmaunmap:
regulator_put(host->vmmc);
}
+err_free_cclk:
+ clk_disable(host->cclk);
+ clk_put(host->cclk);
+
+err_free_hclk:
+ clk_disable(host->hclk);
+ clk_put(host->hclk);
err_freehost:
kfree(host);
@@ -1731,6 +2248,10 @@ static int __exit dw_mci_remove(struct platform_device *pdev)
mci_writel(host, RINTSTS, 0xFFFFFFFF);
mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */
+ if (host->pdata->cd_type == DW_MCI_CD_EXTERNAL) {
+ host->pdata->ext_cd_cleanup(&dw_mci_notify_change);
+ }
+
platform_set_drvdata(pdev, NULL);
for (i = 0; i < host->num_slots; i++) {
@@ -1744,7 +2265,8 @@ static int __exit dw_mci_remove(struct platform_device *pdev)
mci_writel(host, CLKSRC, 0);
free_irq(platform_get_irq(pdev, 0), host);
- dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
+ dma_free_coherent(&pdev->dev, host->buf_size, host->sg_cpu,
+ host->sg_dma);
if (host->use_dma && host->dma_ops->exit)
host->dma_ops->exit(host);
@@ -1754,6 +2276,11 @@ static int __exit dw_mci_remove(struct platform_device *pdev)
regulator_put(host->vmmc);
}
+ clk_disable(host->cclk);
+ clk_put(host->cclk);
+ clk_disable(host->hclk);
+ clk_put(host->hclk);
+
iounmap(host->regs);
kfree(host);
@@ -1769,6 +2296,8 @@ static int dw_mci_suspend(struct platform_device *pdev, pm_message_t mesg)
int i, ret;
struct dw_mci *host = platform_get_drvdata(pdev);
+ dw_mci_disable_card_detection(host);
+
for (i = 0; i < host->num_slots; i++) {
struct dw_mci_slot *slot = host->slot[i];
if (!slot)
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index 23c662a..345e2d7 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -14,6 +14,8 @@
#ifndef _DW_MMC_H_
#define _DW_MMC_H_
+#define DW_MMC_240A 0x240a
+
#define SDMMC_CTRL 0x000
#define SDMMC_PWREN 0x004
#define SDMMC_CLKDIV 0x008
@@ -51,7 +53,15 @@
#define SDMMC_IDINTEN 0x090
#define SDMMC_DSCADDR 0x094
#define SDMMC_BUFADDR 0x098
-#define SDMMC_DATA 0x100
+#define SDMMC_CLKSEL 0x09c
+#define SDMMC_DATA(x) (x)
+
+/*
+ * Data offset is difference according to Version
+ * Lower than 2.40a : data register offest is 0x100
+ */
+#define DATA_OFFSET 0x100
+#define DATA_240A_OFFSET 0x200
/* shift bit field */
#define _SBF(f, v) ((v) << (f))
@@ -82,7 +92,7 @@
#define SDMMC_CTYPE_4BIT BIT(0)
#define SDMMC_CTYPE_1BIT 0
/* Interrupt status & mask register defines */
-#define SDMMC_INT_SDIO BIT(16)
+#define SDMMC_INT_SDIO(n) BIT(16 + (n))
#define SDMMC_INT_EBE BIT(15)
#define SDMMC_INT_ACD BIT(14)
#define SDMMC_INT_SBE BIT(13)
@@ -102,6 +112,7 @@
#define SDMMC_INT_ERROR 0xbfc2
/* Command register defines */
#define SDMMC_CMD_START BIT(31)
+#define SDMMC_USE_HOLD_REG BIT(29)
#define SDMMC_CMD_CCS_EXP BIT(23)
#define SDMMC_CMD_CEATA_RD BIT(22)
#define SDMMC_CMD_UPD_CLK BIT(21)
@@ -117,8 +128,7 @@
#define SDMMC_CMD_RESP_EXP BIT(6)
#define SDMMC_CMD_INDX(n) ((n) & 0x1F)
/* Status register defines */
-#define SDMMC_GET_FCNT(x) (((x)>>17) & 0x1FF)
-#define SDMMC_FIFO_SZ 32
+#define SDMMC_GET_FCNT(x) (((x)>>17) & 0x1FFF)
/* Internal DMAC interrupt defines */
#define SDMMC_IDMAC_INT_AI BIT(9)
#define SDMMC_IDMAC_INT_NI BIT(8)
@@ -131,6 +141,8 @@
#define SDMMC_IDMAC_ENABLE BIT(7)
#define SDMMC_IDMAC_FB BIT(1)
#define SDMMC_IDMAC_SWRESET BIT(0)
+/* Version ID register define */
+#define SDMMC_GET_VERID(x) ((x) & 0xFFFF)
/* Register access macros */
#define mci_readl(dev, reg) \
diff --git a/drivers/mmc/host/mshci-s3c-dma.c b/drivers/mmc/host/mshci-s3c-dma.c
new file mode 100644
index 0000000..d62f544
--- /dev/null
+++ b/drivers/mmc/host/mshci-s3c-dma.c
@@ -0,0 +1,220 @@
+/*
+* linux/drivers/mmc/host/mshci-s3c-dma.c
+* Mobile Storage Host Controller Interface driver
+*
+* Copyright (c) 2011 Samsung Electronics Co., Ltd.
+* http://www.samsung.com
+*
+
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or (at
+* your option) any later version.
+*
+*/
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/gfp.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/highmem.h>
+
+#include <asm/memory.h>
+#include <asm/highmem.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm/sizes.h>
+
+#include <linux/mmc/host.h>
+
+#include "mshci.h"
+
+
+static void mshci_s3c_dma_cache_maint_page(struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ void (*op)(const void *, size_t, int), int flush_type, int enable)
+{
+ /*
+ * A single sg entry may refer to multiple physically contiguous
+ * pages. But we still need to process highmem pages individually.
+ * If highmem is not configured then the bulk of this loop gets
+ * optimized out.
+ */
+ size_t left = size;
+ do {
+ size_t len = left;
+ void *vaddr;
+
+ if (PageHighMem(page)) {
+ if (len + offset > PAGE_SIZE) {
+ if (offset >= PAGE_SIZE) {
+ page += offset / PAGE_SIZE;
+ offset %= PAGE_SIZE;
+ }
+ len = PAGE_SIZE - offset;
+ }
+ vaddr = kmap_high_get(page);
+ if (vaddr) {
+ vaddr += offset;
+ if (flush_type == 0 && enable)
+ op(vaddr, len, dir);
+ kunmap_high(page);
+ } else if (cache_is_vipt()) {
+ /* unmapped pages might still be cached */
+ vaddr = kmap_atomic(page);
+ op(vaddr + offset, len, dir);
+ kunmap_atomic(vaddr);
+ }
+ } else {
+ vaddr = page_address(page) + offset;
+ if (flush_type == 0 && enable)
+ op(vaddr, len, dir);
+ }
+ offset = 0;
+ page++;
+ left -= len;
+
+ } while (left);
+}
+
+
+void mshci_s3c_dma_page_cpu_to_dev(struct page *page, unsigned long off,
+ size_t size, enum dma_data_direction dir, int flush_type)
+{
+ unsigned long paddr;
+
+ if (dir != DMA_FROM_DEVICE) {
+ mshci_s3c_dma_cache_maint_page(page, off, size, dir,
+ dmac_map_area,
+ flush_type, 1);
+
+ paddr = page_to_phys(page) + off;
+ if (flush_type != 2) {
+ outer_clean_range(paddr, paddr + size);
+ }
+ /* FIXME: non-speculating: flush on bidirectional mappings? */
+ } else {
+ paddr = page_to_phys(page) + off;
+
+ /* if flush all L1 cache,
+ L2 cache dose not neet to be clean.
+ because, all buffer dose not have split space */
+ if (flush_type != 2) {
+ outer_clean_range(paddr, paddr + size);
+ outer_inv_range(paddr, paddr + size);
+ }
+ /* FIXME: non-speculating: flush on bidirectional mappings? */
+
+ mshci_s3c_dma_cache_maint_page(page, off, size, dir,
+ dmac_unmap_area,
+ flush_type, 1);
+ }
+}
+
+
+static inline dma_addr_t mshci_s3c_dma_map_page(struct device *dev,
+ struct page *page, unsigned long offset, size_t size,
+ enum dma_data_direction dir, int flush_type)
+{
+ BUG_ON(!valid_dma_direction(dir));
+
+ mshci_s3c_dma_page_cpu_to_dev(page, offset, size, dir, flush_type);
+
+ return pfn_to_dma(dev, page_to_pfn(page)) + offset;
+}
+
+int mshci_s3c_dma_map_sg(struct mshci_host *host, struct device *dev,
+ struct scatterlist *sg, int nents, enum dma_data_direction dir,
+ int flush_type)
+{
+ struct scatterlist *s;
+ int i, j;
+
+ BUG_ON(!valid_dma_direction(dir));
+
+ if (flush_type == 2) {
+ spin_unlock_irqrestore(&host->lock, host->sl_flags);
+ flush_all_cpu_caches();
+ outer_flush_all();
+ spin_lock_irqsave(&host->lock, host->sl_flags);
+ } else if(flush_type == 1) {
+ spin_unlock_irqrestore(&host->lock, host->sl_flags);
+ flush_all_cpu_caches();
+ spin_lock_irqsave(&host->lock, host->sl_flags);
+ }
+
+ for_each_sg(sg, s, nents, i) {
+ s->dma_address = mshci_s3c_dma_map_page(dev, sg_page(s),
+ s->offset, s->length, dir, flush_type);
+ if (dma_mapping_error(dev, s->dma_address)) {
+ goto bad_mapping;
+ }
+ }
+
+ debug_dma_map_sg(dev, sg, nents, nents, dir);
+
+ /* in case of invaldating cache, invaldating L2 cache
+ must be done prior to invaldating L1 cache */
+#if 0
+ if (dir == DMA_FROM_DEVICE) {
+ if (flush_type == 1) {
+ spin_unlock_irqrestore(&host->lock, host->sl_flags);
+ flush_all_cpu_caches();
+ spin_lock_irqsave(&host->lock, host->sl_flags);
+ }
+ }
+#endif
+ return nents;
+
+ bad_mapping:
+ for_each_sg(sg, s, i, j)
+ dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
+ return 0;
+}
+
+void mshci_s3c_dma_page_dev_to_cpu(struct page *page, unsigned long off,
+ size_t size, enum dma_data_direction dir, int flush_type)
+{
+
+ unsigned long paddr = page_to_phys(page) + off;
+
+ /* FIXME: non-speculating: not required */
+ /* don't bother invalidating if DMA to device */
+
+ mshci_s3c_dma_cache_maint_page(page, off, size, dir, dmac_unmap_area,
+ flush_type, 0);
+}
+
+
+static inline void mshci_s3c_dma_unmap_page(struct device *dev,
+ dma_addr_t handle, size_t size,
+ enum dma_data_direction dir, int flush_type)
+{
+ mshci_s3c_dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), \
+ handle & ~PAGE_MASK, size, dir, flush_type);
+}
+
+
+void mshci_s3c_dma_unmap_sg(struct mshci_host *host,
+ struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir, int flush_type)
+{
+#if 1
+ struct scatterlist *s;
+ int i;
+
+ if (dir == DMA_TO_DEVICE)
+ for_each_sg(sg, s, nents, i)
+ mshci_s3c_dma_unmap_page(dev, sg_dma_address(s),
+ sg_dma_len(s),dir, flush_type);
+#endif
+}
+
+MODULE_DESCRIPTION("Samsung MSHCI (HSMMC) own dma map functions");
+MODULE_AUTHOR("Hyunsung Jang, <hs79.jang@samsung.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:s3c-mshci");
+
diff --git a/drivers/mmc/host/mshci-s3c.c b/drivers/mmc/host/mshci-s3c.c
new file mode 100644
index 0000000..323f115
--- /dev/null
+++ b/drivers/mmc/host/mshci-s3c.c
@@ -0,0 +1,631 @@
+/*
+* linux/drivers/mmc/host/mshci-s3c.c
+* Mobile Storage Host Controller Interface driver
+*
+* Copyright (c) 2011 Samsung Electronics Co., Ltd.
+* http://www.samsung.com
+*
+* Based on linux/drivers/mmc/host/sdhci-s3c.c
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or (at
+* your option) any later version.
+*
+*/
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+
+#include <linux/mmc/host.h>
+
+#include <plat/gpio-cfg.h>
+#include <plat/mshci.h>
+#include <plat/clock.h>
+#include <plat/cpu.h>
+
+#include "mshci.h"
+
+#ifdef CONFIG_MMC_MSHCI_S3C_DMA_MAP
+int mshci_s3c_dma_map_sg(struct mshci_host *host, struct device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, int flush_type);
+
+void mshci_s3c_dma_unmap_sg(struct mshci_host *host, struct device *dev,
+ struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, int flush_type);
+#endif
+
+#define MAX_BUS_CLK (1)
+
+/**
+ * struct mshci_s3c - S3C MSHCI instance
+ * @host: The MSHCI host created
+ * @pdev: The platform device we where created from.
+ * @ioarea: The resource created when we claimed the IO area.
+ * @pdata: The platform data for this controller.
+ * @cur_clk: The index of the current bus clock.
+ * @clk_io: The clock for the internal bus interface.
+ * @clk_bus: The clocks that are available for the SD/MMC bus clock.
+ */
+struct mshci_s3c {
+ struct mshci_host *host;
+ struct platform_device *pdev;
+ struct resource *ioarea;
+ struct s3c_mshci_platdata *pdata;
+ unsigned int cur_clk;
+ int ext_cd_irq;
+ int ext_cd_gpio;
+
+ struct clk *clk_io;
+ struct clk *clk_bus[MAX_BUS_CLK];
+};
+
+static inline struct mshci_s3c *to_s3c(struct mshci_host *host)
+{
+ return mshci_priv(host);
+}
+
+/**
+ * mshci_s3c_get_max_clk - callback to get maximum clock frequency.
+ * @host: The MSHCI host instance.
+ *
+ * Callback to return the maximum clock rate acheivable by the controller.
+*/
+static unsigned int mshci_s3c_get_max_clk(struct mshci_host *host)
+{
+ struct mshci_s3c *ourhost = to_s3c(host);
+ struct clk *busclk;
+ unsigned int rate, max;
+ int clk;
+
+ for (max = 0, clk = 0; clk < MAX_BUS_CLK; clk++) {
+ busclk = ourhost->clk_bus[clk];
+ if (!busclk)
+ continue;
+
+ rate = clk_get_rate(busclk);
+ /* It should be checked later ############# */
+ if (rate > max) {
+ if ((soc_is_exynos4412() || soc_is_exynos4212()) &&
+ (samsung_rev() >= EXYNOS4412_REV_1_0))
+ max = rate >> 2;
+ else
+ max = rate >> 1;
+ }
+ }
+
+ /* max clock can be change after changing clock source. */
+ host->mmc->f_max = max;
+ return max;
+}
+
+/**
+ * mshci_s3c_consider_clock - consider one the bus clocks for current setting
+ * @ourhost: Our MSHCI instance.
+ * @src: The source clock index.
+ * @wanted: The clock frequency wanted.
+ */
+static unsigned int mshci_s3c_consider_clock(struct mshci_s3c *ourhost,
+ unsigned int src,
+ unsigned int wanted)
+{
+ unsigned long rate;
+ struct clk *clksrc = ourhost->clk_bus[src];
+ int div;
+
+ if (!clksrc)
+ return UINT_MAX;
+
+ rate = clk_get_rate(clksrc);
+
+ for (div = 1; div < 256; div *= 2) {
+ if ((rate / div) <= wanted)
+ break;
+ }
+
+ dev_dbg(&ourhost->pdev->dev, "clk %d: rate %ld, want %d, got %ld\n",
+ src, rate, wanted, rate / div);
+
+ return wanted - (rate / div);
+}
+
+/**
+ * mshci_s3c_set_clock - callback on clock change
+ * @host: The MSHCI host being changed
+ * @clock: The clock rate being requested.
+ *
+ * When the card's clock is going to be changed, look at the new frequency
+ * and find the best clock source to go with it.
+*/
+static void mshci_s3c_set_clock(struct mshci_host *host, unsigned int clock)
+{
+ struct mshci_s3c *ourhost = to_s3c(host);
+ unsigned int best = UINT_MAX;
+ unsigned int delta;
+ int best_src = 0;
+ int src;
+
+ /* don't bother if the clock is going off. */
+ if (clock == 0)
+ return;
+
+ for (src = 0; src < MAX_BUS_CLK; src++) {
+ delta = mshci_s3c_consider_clock(ourhost, src, clock);
+ if (delta < best) {
+ best = delta;
+ best_src = src;
+ }
+ }
+
+ dev_dbg(&ourhost->pdev->dev,
+ "selected source %d, clock %d, delta %d\n",
+ best_src, clock, best);
+
+ /* select the new clock source */
+
+ if (ourhost->cur_clk != best_src) {
+ struct clk *clk = ourhost->clk_bus[best_src];
+
+ ourhost->cur_clk = best_src;
+ host->max_clk = clk_get_rate(clk);
+ }
+
+ /* reconfigure the hardware for new clock rate */
+
+ {
+ struct mmc_ios ios;
+
+ ios.clock = clock;
+
+ if (ourhost->pdata->cfg_card)
+ (ourhost->pdata->cfg_card)(ourhost->pdev, host->ioaddr,
+ &ios, NULL);
+ }
+}
+
+/**
+ * mshci_s3c_get_ro - callback for get_ro
+ * @host: The MSHCI host being changed
+ *
+ * If the WP pin is connected with GPIO, can get the value which indicates
+ * the card is locked or not.
+*/
+static int mshci_s3c_get_ro(struct mmc_host *mmc)
+{
+ struct mshci_s3c *ourhost = to_s3c(mmc_priv(mmc));
+
+ return gpio_get_value(ourhost->pdata->wp_gpio);
+}
+
+/**
+ * mshci_s3c_cfg_wp - configure GPIO for WP pin
+ * @gpio_num: GPIO number which connected with WP line from SD/MMC slot
+ *
+ * Configure GPIO for using WP line
+ */
+static void mshci_s3c_cfg_wp(unsigned int gpio_num)
+{
+ s3c_gpio_cfgpin(gpio_num, S3C_GPIO_INPUT);
+ s3c_gpio_setpull(gpio_num, S3C_GPIO_PULL_UP);
+}
+
+static void mshci_s3c_set_ios(struct mshci_host *host,
+ struct mmc_ios *ios)
+{
+ struct mshci_s3c *ourhost = to_s3c(host);
+ struct s3c_mshci_platdata *pdata = ourhost->pdata;
+ int width;
+
+ if (ios->power_mode != MMC_POWER_OFF) {
+ switch (ios->bus_width) {
+ case MMC_BUS_WIDTH_8:
+ width = 8;
+ break;
+ case MMC_BUS_WIDTH_4:
+ width = 4;
+ break;
+ case MMC_BUS_WIDTH_1:
+ width = 1;
+ break;
+ default:
+ BUG();
+ }
+
+ if (pdata->cfg_gpio)
+ pdata->cfg_gpio(ourhost->pdev, width);
+ }
+
+ if (pdata->cfg_card)
+ pdata->cfg_card(ourhost->pdev, host->ioaddr,
+ ios, host->mmc->card);
+
+ if (pdata->cfg_ddr) {
+ if (ios->timing == MMC_TIMING_UHS_DDR50)
+ pdata->cfg_ddr(ourhost->pdev, 1);
+ else
+ pdata->cfg_ddr(ourhost->pdev, 0);
+ }
+ /* after change DDR/SDR, max_clk has been changed.
+ You should re-calc the max_clk */
+ host->max_clk = mshci_s3c_get_max_clk(host);
+}
+
+/**
+ * mshci_s3c_init_card - Reset eMMC device
+ *
+ * init eMMC_card.
+ */
+
+static void mshci_s3c_init_card(struct mshci_host *host)
+{
+ struct mshci_s3c *ourhost = to_s3c(host);
+ struct s3c_mshci_platdata *pdata = ourhost->pdata;
+
+ if (pdata->init_card)
+ pdata->init_card(ourhost->pdev);
+}
+
+static int mshci_s3c_get_fifo_depth(struct mshci_host *host)
+{
+ struct mshci_s3c *ourhost = to_s3c(host);
+ struct s3c_mshci_platdata *pdata = ourhost->pdata;
+
+ return pdata->fifo_depth;
+}
+
+
+static struct mshci_ops mshci_s3c_ops = {
+ .get_max_clock = mshci_s3c_get_max_clk,
+ .set_clock = mshci_s3c_set_clock,
+ .set_ios = mshci_s3c_set_ios,
+ .init_card = mshci_s3c_init_card,
+#ifdef CONFIG_MMC_MSHCI_S3C_DMA_MAP
+ .dma_map_sg = mshci_s3c_dma_map_sg,
+ .dma_unmap_sg = mshci_s3c_dma_unmap_sg,
+#endif
+ .get_fifo_depth = mshci_s3c_get_fifo_depth,
+};
+
+static void mshci_s3c_notify_change(struct platform_device *dev, int state)
+{
+ struct mshci_host *host;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ host = platform_get_drvdata(dev);
+ if (host) {
+ if (state) {
+ dev_dbg(&dev->dev, "card inserted.\n");
+ host->flags &= ~MSHCI_DEVICE_DEAD;
+ tasklet_schedule(&host->card_tasklet);
+ } else {
+ dev_dbg(&dev->dev, "card removed.\n");
+ host->flags |= MSHCI_DEVICE_DEAD;
+ tasklet_schedule(&host->card_tasklet);
+ }
+ }
+ local_irq_restore(flags);
+}
+
+static irqreturn_t mshci_s3c_gpio_card_detect_isr(int irq, void *dev_id)
+{
+ struct mshci_s3c *sc = dev_id;
+ int status = gpio_get_value(sc->ext_cd_gpio);
+ if (sc->pdata->ext_cd_gpio_invert)
+ status = !status;
+ mshci_s3c_notify_change(sc->pdev, status);
+ return IRQ_HANDLED;
+}
+
+
+static int __devinit mshci_s3c_probe(struct platform_device *pdev)
+{
+ struct s3c_mshci_platdata *pdata = pdev->dev.platform_data;
+ struct device *dev = &pdev->dev;
+ struct mshci_host *host;
+ struct mshci_s3c *sc;
+ struct resource *res;
+ int ret, irq, ptr, clks;
+
+ if (!pdata) {
+ dev_err(dev, "no device data specified\n");
+ return -ENOENT;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "no irq specified\n");
+ return irq;
+ }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(dev, "no memory specified\n");
+ return -ENOENT;
+ }
+ host = mshci_alloc_host(dev, sizeof(struct mshci_s3c));
+ if (IS_ERR(host)) {
+ dev_err(dev, "mshci_alloc_host() failed\n");
+ return PTR_ERR(host);
+ }
+ sc = mshci_priv(host);
+
+ if (soc_is_exynos4210()) {
+ host->data_addr = 0x0;
+ host->hold_bit = 0;
+ } else {
+ host->data_addr = 0x100;
+ host->hold_bit = CMD_USE_HOLD_REG;
+ }
+
+ sc->host = host;
+ sc->pdev = pdev;
+ sc->pdata = pdata;
+ sc->ext_cd_gpio = -1;
+
+ platform_set_drvdata(pdev, host);
+
+ sc->clk_io = clk_get(dev, "dwmci");
+ if (IS_ERR(sc->clk_io)) {
+ dev_err(dev, "failed to get io clock\n");
+ ret = PTR_ERR(sc->clk_io);
+ goto err_io_clk;
+ }
+
+ /* enable the local io clock and keep it running for the moment. */
+ clk_enable(sc->clk_io);
+
+ for (clks = 0, ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
+ struct clk *clk;
+ char *name = pdata->clocks[ptr];
+
+ if (name == NULL)
+ continue;
+ clk = clk_get(dev, name);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "failed to get clock %s\n", name);
+ continue;
+ }
+
+#if defined(CONFIG_EXYNOS4_MSHC_VPLL_46MHZ) || \
+ defined(CONFIG_EXYNOS4_MSHC_EPLL_45MHZ)
+ if (!strcmp("sclk_dwmci", name)) {
+ struct clk *parent_clk;
+
+ parent_clk = clk_get_parent(clk);
+
+ if (!parent_clk) {
+ dev_err(dev, "failed to get parent clock %s\n"
+ , (char *)(clk->name));
+ } else {
+ for ( ; ; ) {
+ parent_clk = clk_get_parent(parent_clk);
+ if (parent_clk) {
+#ifdef CONFIG_EXYNOS4_MSHC_EPLL_45MHZ
+ if (!strcmp("fout_epll", \
+ parent_clk->name)) {
+ clk_set_rate \
+ (parent_clk, 180633600);
+ pdata->cfg_ddr(pdev, 0);
+#elif defined(CONFIG_EXYNOS4_MSHC_VPLL_46MHZ)
+ if (!strcmp("fout_vpll", \
+ parent_clk->name)) {
+ clk_set_rate \
+ (parent_clk, 370882812);
+ pdata->cfg_ddr(pdev, 0);
+#endif
+ clk_enable(parent_clk);
+ break;
+ } else
+ continue;
+ } else {
+ dev_err(dev, "failed to"
+ "get parent"
+ "clock %s\n"
+ , clk->name);
+ break;
+ }
+ }
+ }
+ }
+#endif
+ clks++;
+ sc->clk_bus[ptr] = clk;
+ clk_enable(clk);
+
+ dev_info(dev, "clock source %d: %s (%ld Hz)\n",
+ ptr, name, clk_get_rate(clk));
+ }
+
+ if (clks == 0) {
+ dev_err(dev, "failed to find any bus clocks\n");
+ ret = -ENOENT;
+ goto err_no_busclks;
+ }
+
+ sc->ioarea = request_mem_region(res->start, resource_size(res),
+ mmc_hostname(host->mmc));
+ if (!sc->ioarea) {
+ dev_err(dev, "failed to reserve register area\n");
+ ret = -ENXIO;
+ goto err_req_regs;
+ }
+
+ host->ioaddr = ioremap_nocache(res->start, resource_size(res));
+ if (!host->ioaddr) {
+ dev_err(dev, "failed to map registers\n");
+ ret = -ENXIO;
+ goto err_req_regs;
+ }
+
+ /* Ensure we have minimal gpio selected CMD/CLK/Detect */
+ if (pdata->cfg_gpio)
+ pdata->cfg_gpio(pdev, pdata->max_width);
+ else
+ dev_err(dev, "cfg_gpio dose not exist.!\n");
+
+ host->hw_name = "samsung-mshci";
+ host->ops = &mshci_s3c_ops;
+ host->quirks = 0;
+ host->irq = irq;
+
+ if (pdata->host_caps)
+ host->mmc->caps = pdata->host_caps;
+ else
+ host->mmc->caps = 0;
+
+ if (pdata->host_caps2)
+ host->mmc->caps2 = pdata->host_caps2;
+ else
+ host->mmc->caps2 = 0;
+
+ if (pdata->cd_type == S3C_MSHCI_CD_PERMANENT) {
+ host->quirks |= MSHCI_QUIRK_BROKEN_PRESENT_BIT;
+ host->mmc->caps |= MMC_CAP_NONREMOVABLE;
+ }
+
+ /* IF SD controller's WP pin donsn't connected with SD card and there
+ * is an allocated GPIO for getting WP data form SD card,
+ * use this quirk and send the GPIO number in pdata->wp_gpio. */
+ if (pdata->has_wp_gpio && gpio_is_valid(pdata->wp_gpio)) {
+ mshci_s3c_ops.get_ro = mshci_s3c_get_ro;
+ host->quirks |= MSHCI_QUIRK_NO_WP_BIT;
+ mshci_s3c_cfg_wp(pdata->wp_gpio);
+ }
+
+ ret = mshci_add_host(host);
+
+ if (pdata->cd_type == S3C_MSHCI_CD_GPIO &&
+ gpio_is_valid(pdata->ext_cd_gpio)) {
+
+ ret = gpio_request(pdata->ext_cd_gpio, "MSHCI EXT CD");
+ if (ret) {
+ dev_err(&pdev->dev, "cannot request gpio for card detect\n");
+ goto err_add_host;
+ }
+
+ sc->ext_cd_gpio = pdata->ext_cd_gpio;
+
+ sc->ext_cd_irq = gpio_to_irq(pdata->ext_cd_gpio);
+ if (sc->ext_cd_irq &&
+ request_irq(sc->ext_cd_irq,
+ mshci_s3c_gpio_card_detect_isr,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ dev_name(&pdev->dev), sc)) {
+ dev_err(&pdev->dev, "cannot request irq for card detect\n");
+ sc->ext_cd_irq = 0;
+ }
+ dev_dbg(&pdev->dev, "mshci detects a card insertion/removal"
+ "by EINT\n");
+ }
+
+ if (ret) {
+ dev_err(dev, "mshci_add_host() failed\n");
+ goto err_add_host;
+ }
+
+ device_enable_async_suspend(dev);
+
+ return 0;
+
+ err_add_host:
+ release_resource(sc->ioarea);
+ kfree(sc->ioarea);
+
+ err_req_regs:
+ for (ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
+ clk_disable(sc->clk_bus[ptr]);
+ clk_put(sc->clk_bus[ptr]);
+ }
+
+ err_no_busclks:
+ clk_disable(sc->clk_io);
+ clk_put(sc->clk_io);
+
+ err_io_clk:
+ mshci_free_host(host);
+ return ret;
+}
+
+static int __devexit mshci_s3c_remove(struct platform_device *pdev)
+{
+ return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int mshci_s3c_suspend(struct platform_device *dev, pm_message_t pm)
+{
+ struct mshci_host *host = platform_get_drvdata(dev);
+ struct s3c_mshci_platdata *pdata = dev->dev.platform_data;
+
+ mshci_suspend_host(host, pm);
+
+ if (pdata->set_power)
+ pdata->set_power(dev, 0);
+
+ return 0;
+}
+
+static int mshci_s3c_resume(struct platform_device *dev)
+{
+ struct mshci_host *host = platform_get_drvdata(dev);
+ struct s3c_mshci_platdata *pdata = dev->dev.platform_data;
+
+ if (pdata->set_power)
+ pdata->set_power(dev, 1);
+
+ mshci_resume_host(host);
+ return 0;
+}
+
+static void mshci_s3c_shutdown(struct platform_device *dev, pm_message_t pm)
+{
+ struct mshci_host *host = platform_get_drvdata(dev);
+ struct s3c_mshci_platdata *pdata = dev->dev.platform_data;
+
+ mshci_suspend_host(host, pm);
+
+ if (pdata->shutdown)
+ pdata->shutdown();
+}
+
+
+#else
+#define mshci_s3c_suspend NULL
+#define mshci_s3c_resume NULL
+#endif
+
+static struct platform_driver mshci_s3c_driver = {
+ .probe = mshci_s3c_probe,
+ .remove = __devexit_p(mshci_s3c_remove),
+ .suspend = mshci_s3c_suspend,
+ .resume = mshci_s3c_resume,
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "dw_mmc",
+ },
+};
+
+static int __init mshci_s3c_init(void)
+{
+ return platform_driver_register(&mshci_s3c_driver);
+}
+
+static void __exit mshci_s3c_exit(void)
+{
+ platform_driver_unregister(&mshci_s3c_driver);
+}
+
+module_init(mshci_s3c_init);
+module_exit(mshci_s3c_exit);
+
+MODULE_DESCRIPTION("Samsung MSHCI (HSMMC) glue");
+MODULE_AUTHOR("Hyunsung Jang, <hs79.jang@samsung.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:dw_mmc");
diff --git a/drivers/mmc/host/mshci.c b/drivers/mmc/host/mshci.c
new file mode 100644
index 0000000..73de297
--- /dev/null
+++ b/drivers/mmc/host/mshci.c
@@ -0,0 +1,2248 @@
+/*
+* linux/drivers/mmc/host/mshci.c
+* Mobile Storage Host Controller Interface driver
+*
+* Copyright (c) 2011 Samsung Electronics Co., Ltd.
+* http://www.samsung.com
+*
+* Based on linux/drivers/mmc/host/sdhci.c
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or (at
+* your option) any later version.
+*
+*/
+
+#include <linux/delay.h>
+#include <linux/highmem.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/scatterlist.h>
+
+#include <linux/leds.h>
+
+#include <linux/mmc/host.h>
+
+#include <plat/cpu.h>
+
+#include "mshci.h"
+
+#define DRIVER_NAME "mshci"
+
+#define DBG(f, x...) \
+ pr_debug(DRIVER_NAME " [%s()]: " f, __func__, ## x)
+
+#define SDHC_CLK_ON 1
+#define SDHC_CLK_OFF 0
+
+static unsigned int debug_quirks;
+
+static void mshci_prepare_data(struct mshci_host *, struct mmc_data *);
+static void mshci_finish_data(struct mshci_host *);
+
+static void mshci_send_command(struct mshci_host *, struct mmc_command *);
+static void mshci_finish_command(struct mshci_host *);
+static void mshci_fifo_init(struct mshci_host *host);
+
+static void mshci_set_clock(struct mshci_host *host,
+ unsigned int clock, u32 bus_width);
+
+#define MSHCI_MAX_DMA_SINGLE_TRANS_SIZE (0x1000)
+#define MSHCI_MAX_DMA_TRANS_SIZE (0x400000)
+#define MSHCI_MAX_DMA_LIST (MSHCI_MAX_DMA_TRANS_SIZE / \
+ MSHCI_MAX_DMA_SINGLE_TRANS_SIZE)
+
+static void mshci_dumpregs(struct mshci_host *host)
+{
+ printk(KERN_DEBUG DRIVER_NAME ": ============== REGISTER DUMP ==============\n");
+ printk(KERN_DEBUG DRIVER_NAME ": MSHCI_CTRL: 0x%08x\n",
+ mshci_readl(host, MSHCI_CTRL));
+ printk(KERN_DEBUG DRIVER_NAME ": MSHCI_PWREN: 0x%08x\n",
+ mshci_readl(host, MSHCI_PWREN));
+ printk(KERN_DEBUG DRIVER_NAME ": MSHCI_CLKDIV: 0x%08x\n",
+ mshci_readl(host, MSHCI_CLKDIV));
+ printk(KERN_DEBUG DRIVER_NAME ": MSHCI_CLKSRC: 0x%08x\n",
+ mshci_readl(host, MSHCI_CLKSRC));
+ printk(KERN_DEBUG DRIVER_NAME ": MSHCI_CLKENA: 0x%08x\n",
+ mshci_readl(host, MSHCI_CLKENA));
+ printk(KERN_DEBUG DRIVER_NAME ": MSHCI_TMOUT: 0x%08x\n",
+ mshci_readl(host, MSHCI_TMOUT));
+ printk(KERN_DEBUG DRIVER_NAME ": MSHCI_CTYPE: 0x%08x\n",
+ mshci_readl(host, MSHCI_CTYPE));
+ printk(KERN_DEBUG DRIVER_NAME ": MSHCI_BLKSIZ: 0x%08x\n",
+ mshci_readl(host, MSHCI_BLKSIZ));
+ printk(KERN_DEBUG DRIVER_NAME ": MSHCI_BYTCNT: 0x%08x\n",
+ mshci_readl(host, MSHCI_BYTCNT));
+ printk(KERN_DEBUG DRIVER_NAME ": MSHCI_INTMSK: 0x%08x\n",
+ mshci_readl(host, MSHCI_INTMSK));
+ printk(KERN_DEBUG DRIVER_NAME ": MSHCI_CMDARG: 0x%08x\n",
+ mshci_readl(host, MSHCI_CMDARG));
+ printk(KERN_DEBUG DRIVER_NAME ": MSHCI_CMD: 0x%08x\n",
+ mshci_readl(host, MSHCI_CMD));
+ printk(KERN_DEBUG DRIVER_NAME ": MSHCI_MINTSTS: 0x%08x\n",
+ mshci_readl(host, MSHCI_MINTSTS));
+ printk(KERN_DEBUG DRIVER_NAME ": MSHCI_RINTSTS: 0x%08x\n",
+ mshci_readl(host, MSHCI_RINTSTS));
+ printk(KERN_DEBUG DRIVER_NAME ": MSHCI_STATUS: 0x%08x\n",
+ mshci_readl(host, MSHCI_STATUS));
+ printk(KERN_DEBUG DRIVER_NAME ": MSHCI_FIFOTH: 0x%08x\n",
+ mshci_readl(host, MSHCI_FIFOTH));
+ printk(KERN_DEBUG DRIVER_NAME ": MSHCI_CDETECT: 0x%08x\n",
+ mshci_readl(host, MSHCI_CDETECT));
+ printk(KERN_DEBUG DRIVER_NAME ": MSHCI_WRTPRT: 0x%08x\n",
+ mshci_readl(host, MSHCI_WRTPRT));
+ printk(KERN_DEBUG DRIVER_NAME ": MSHCI_GPIO: 0x%08x\n",
+ mshci_readl(host, MSHCI_GPIO));
+ printk(KERN_DEBUG DRIVER_NAME ": MSHCI_TCBCNT: 0x%08x\n",
+ mshci_readl(host, MSHCI_TCBCNT));
+ printk(KERN_DEBUG DRIVER_NAME ": MSHCI_TBBCNT: 0x%08x\n",
+ mshci_readl(host, MSHCI_TBBCNT));
+ printk(KERN_DEBUG DRIVER_NAME ": MSHCI_DEBNCE: 0x%08x\n",
+ mshci_readl(host, MSHCI_DEBNCE));
+ printk(KERN_DEBUG DRIVER_NAME ": MSHCI_USRID: 0x%08x\n",
+ mshci_readl(host, MSHCI_USRID));
+ printk(KERN_DEBUG DRIVER_NAME ": MSHCI_VERID: 0x%08x\n",
+ mshci_readl(host, MSHCI_VERID));
+ printk(KERN_DEBUG DRIVER_NAME ": MSHCI_HCON: 0x%08x\n",
+ mshci_readl(host, MSHCI_HCON));
+ printk(KERN_DEBUG DRIVER_NAME ": MSHCI_UHS_REG: 0x%08x\n",
+ mshci_readl(host, MSHCI_UHS_REG));
+ printk(KERN_DEBUG DRIVER_NAME ": MSHCI_BMOD: 0x%08x\n",
+ mshci_readl(host, MSHCI_BMOD));
+ printk(KERN_DEBUG DRIVER_NAME ": MSHCI_PLDMND: 0x%08x\n",
+ mshci_readl(host, MSHCI_PLDMND));
+ printk(KERN_DEBUG DRIVER_NAME ": MSHCI_DBADDR: 0x%08x\n",
+ mshci_readl(host, MSHCI_DBADDR));
+ printk(KERN_DEBUG DRIVER_NAME ": MSHCI_IDSTS: 0x%08x\n",
+ mshci_readl(host, MSHCI_IDSTS));
+ printk(KERN_DEBUG DRIVER_NAME ": MSHCI_IDINTEN: 0x%08x\n",
+ mshci_readl(host, MSHCI_IDINTEN));
+ printk(KERN_DEBUG DRIVER_NAME ": MSHCI_DSCADDR: 0x%08x\n",
+ mshci_readl(host, MSHCI_DSCADDR));
+ printk(KERN_DEBUG DRIVER_NAME ": MSHCI_BUFADDR: 0x%08x\n",
+ mshci_readl(host, MSHCI_BUFADDR));
+ printk(KERN_DEBUG DRIVER_NAME ": MSHCI_WAKEUPCON: 0x%08x\n",
+ mshci_readl(host, MSHCI_WAKEUPCON));
+ printk(KERN_DEBUG DRIVER_NAME ": MSHCI_CLOCKCON: 0x%08x\n",
+ mshci_readl(host, MSHCI_CLOCKCON));
+ printk(KERN_DEBUG DRIVER_NAME ": MSHCI_FIFODAT: 0x%08x\n",
+ mshci_readl(host, MSHCI_FIFODAT + host->data_addr));
+ printk(KERN_DEBUG DRIVER_NAME ": ===========================================\n");
+}
+
+
+/*****************************************************************************\
+ * *
+ * Low level functions *
+ * *
+\*****************************************************************************/
+
+static void mshci_clear_set_irqs(struct mshci_host *host, u32 clear, u32 set)
+{
+ u32 ier;
+
+ ier = mshci_readl(host, MSHCI_INTMSK);
+ ier &= ~clear;
+ ier |= set;
+ mshci_writel(host, ier, MSHCI_INTMSK);
+}
+
+static void mshci_unmask_irqs(struct mshci_host *host, u32 irqs)
+{
+ mshci_clear_set_irqs(host, 0, irqs);
+}
+
+static void mshci_mask_irqs(struct mshci_host *host, u32 irqs)
+{
+ mshci_clear_set_irqs(host, irqs, 0);
+}
+
+static void mshci_set_card_detection(struct mshci_host *host, bool enable)
+{
+ u32 irqs = INTMSK_CDETECT;
+
+ /* it can makes a problme if enable CD_DETECT interrupt,
+ * when CD pin dose not exist. */
+ if (host->quirks & MSHCI_QUIRK_BROKEN_CARD_DETECTION ||
+ host->quirks & MSHCI_QUIRK_BROKEN_PRESENT_BIT) {
+ mshci_mask_irqs(host, irqs);
+ } else if (enable) {
+ mshci_unmask_irqs(host, irqs);
+ } else {
+ mshci_mask_irqs(host, irqs);
+ }
+}
+
+static void mshci_enable_card_detection(struct mshci_host *host)
+{
+ mshci_set_card_detection(host, true);
+}
+
+static void mshci_disable_card_detection(struct mshci_host *host)
+{
+ mshci_set_card_detection(host, false);
+}
+
+static void mshci_reset_ciu(struct mshci_host *host)
+{
+ u32 timeout = 100;
+ u32 ier;
+
+ ier = mshci_readl(host, MSHCI_CTRL);
+ ier |= CTRL_RESET;
+
+ mshci_writel(host, ier, MSHCI_CTRL);
+ while (mshci_readl(host, MSHCI_CTRL) & CTRL_RESET) {
+ if (timeout == 0) {
+ printk(KERN_ERR "%s: Reset CTRL never completed.\n",
+ mmc_hostname(host->mmc));
+ mshci_dumpregs(host);
+ return;
+ }
+ timeout--;
+ mdelay(1);
+ }
+}
+
+static void mshci_reset_fifo(struct mshci_host *host)
+{
+ u32 timeout = 100;
+ u32 ier;
+
+ ier = mshci_readl(host, MSHCI_CTRL);
+ ier |= FIFO_RESET;
+
+ mshci_writel(host, ier, MSHCI_CTRL);
+ while (mshci_readl(host, MSHCI_CTRL) & FIFO_RESET) {
+ if (timeout == 0) {
+ printk(KERN_ERR "%s: Reset FIFO never completed.\n",
+ mmc_hostname(host->mmc));
+ mshci_dumpregs(host);
+ return;
+ }
+ timeout--;
+ mdelay(1);
+ }
+}
+
+static void mshci_reset_dma(struct mshci_host *host)
+{
+ u32 timeout = 100;
+ u32 ier;
+
+ ier = mshci_readl(host, MSHCI_CTRL);
+ ier |= DMA_RESET;
+
+ mshci_writel(host, ier, MSHCI_CTRL);
+ while (mshci_readl(host, MSHCI_CTRL) & DMA_RESET) {
+ if (timeout == 0) {
+ printk(KERN_ERR "%s: Reset DMA never completed.\n",
+ mmc_hostname(host->mmc));
+ mshci_dumpregs(host);
+ return;
+ }
+ timeout--;
+ mdelay(1);
+ }
+}
+
+static void mshci_reset_all(struct mshci_host *host)
+{
+ int count, err = 0;
+
+ /* Wait max 100 ms */
+ count = 10000;
+
+ /* before reset ciu, it should check DATA0. if when DATA0 is low and
+ it resets ciu, it might make a problem */
+ do {
+ if (!(mshci_readl(host, MSHCI_STATUS) & (1<<9))) {
+ udelay(100);
+ if (!(mshci_readl(host, MSHCI_STATUS) & (1<<9))) {
+ udelay(100);
+ if (!(mshci_readl(host, MSHCI_STATUS) & (1<<9)))
+ break;
+ }
+ }
+ if (count == 0) {
+ printk(KERN_ERR "%s: Controller never released "
+ "data0 before reset ciu.\n",
+ mmc_hostname(host->mmc));
+ mshci_dumpregs(host);
+ err = 1;
+ break;
+ }
+ count--;
+ udelay(10);
+ } while (1);
+
+ if (err && host->ops->init_card) {
+ printk(KERN_ERR "%s: eMMC's data lines get low.\n"
+ "Reset eMMC.\n", mmc_hostname(host->mmc));
+ host->ops->init_card(host);
+ }
+
+ mshci_reset_ciu(host);
+ udelay(1);
+ mshci_reset_fifo(host);
+ udelay(1);
+ mshci_reset_dma(host);
+ udelay(1);
+}
+
+static void mshci_init(struct mshci_host *host)
+{
+ mshci_reset_all(host);
+
+ /* clear interrupt status */
+ mshci_writel(host, INTMSK_ALL, MSHCI_RINTSTS);
+
+ mshci_clear_set_irqs(host, INTMSK_ALL,
+ INTMSK_CDETECT | INTMSK_RE |
+ INTMSK_CDONE | INTMSK_DTO | INTMSK_TXDR | INTMSK_RXDR |
+ INTMSK_RCRC | INTMSK_DCRC | INTMSK_RTO | INTMSK_DRTO |
+ INTMSK_HTO | INTMSK_FRUN | INTMSK_HLE | INTMSK_SBE |
+ INTMSK_EBE);
+}
+
+static void mshci_reinit(struct mshci_host *host)
+{
+ mshci_init(host);
+ mshci_enable_card_detection(host);
+}
+
+/*****************************************************************************\
+ * *
+ * Core functions *
+ * *
+\*****************************************************************************/
+
+static void mshci_read_block_pio(struct mshci_host *host)
+{
+ unsigned long flags;
+ size_t fifo_cnt, len, chunk;
+ u32 uninitialized_var(scratch);
+ u8 *buf;
+
+ DBG("PIO reading\n");
+
+ fifo_cnt = (mshci_readl(host, MSHCI_STATUS)&FIFO_COUNT)>>17;
+ fifo_cnt *= FIFO_WIDTH;
+ chunk = 0;
+
+ local_irq_save(flags);
+
+ while (fifo_cnt) {
+ if (!sg_miter_next(&host->sg_miter))
+ BUG();
+
+ len = min(host->sg_miter.length, fifo_cnt);
+
+ fifo_cnt -= len;
+ host->sg_miter.consumed = len;
+
+ buf = host->sg_miter.addr;
+
+ while (len) {
+ if (chunk == 0) {
+ scratch = mshci_readl(host,
+ MSHCI_FIFODAT + host->data_addr);
+ chunk = 4;
+ }
+
+ *buf = scratch & 0xFF;
+
+ buf++;
+ scratch >>= 8;
+ chunk--;
+ len--;
+ }
+ }
+
+ sg_miter_stop(&host->sg_miter);
+
+ local_irq_restore(flags);
+}
+
+static void mshci_write_block_pio(struct mshci_host *host)
+{
+ unsigned long flags;
+ size_t fifo_cnt, len, chunk;
+ u32 scratch;
+ u8 *buf;
+
+ DBG("PIO writing\n");
+
+ fifo_cnt = 8;
+
+ fifo_cnt *= FIFO_WIDTH;
+ chunk = 0;
+ scratch = 0;
+
+ local_irq_save(flags);
+
+ while (fifo_cnt) {
+ if (!sg_miter_next(&host->sg_miter)) {
+
+ /* Even though transfer is complete,
+ * TXDR interrupt occurs again.
+ * So, it has to check that it has really
+ * no next sg buffer or just DTO interrupt
+ * has not occured yet.
+ */
+
+ if ((host->data->blocks * host->data->blksz) ==
+ host->data_transfered)
+ break; /* transfer done but DTO not yet */
+ BUG();
+ }
+ len = min(host->sg_miter.length, fifo_cnt);
+
+ fifo_cnt -= len;
+ host->sg_miter.consumed = len;
+ host->data_transfered += len;
+
+ buf = (host->sg_miter.addr);
+
+ while (len) {
+ scratch |= (u32)*buf << (chunk * 8);
+
+ buf++;
+ chunk++;
+ len--;
+
+ if ((chunk == 4) || ((len == 0) && (fifo_cnt == 0))) {
+ mshci_writel(host, scratch,
+ MSHCI_FIFODAT + host->data_addr);
+ chunk = 0;
+ scratch = 0;
+ }
+ }
+ }
+
+ sg_miter_stop(&host->sg_miter);
+
+ local_irq_restore(flags);
+}
+
+static void mshci_transfer_pio(struct mshci_host *host)
+{
+ BUG_ON(!host->data);
+
+ if (host->blocks == 0)
+ return;
+
+ if (host->data->flags & MMC_DATA_READ)
+ mshci_read_block_pio(host);
+ else
+ mshci_write_block_pio(host);
+
+ DBG("PIO transfer complete.\n");
+}
+
+static void mshci_set_mdma_desc(u8 *desc_vir, u8 *desc_phy,
+ u32 des0, u32 des1, u32 des2)
+{
+ ((struct mshci_idmac *)(desc_vir))->des0 = des0;
+ ((struct mshci_idmac *)(desc_vir))->des1 = des1;
+ ((struct mshci_idmac *)(desc_vir))->des2 = des2;
+ ((struct mshci_idmac *)(desc_vir))->des3 = (u32)desc_phy +
+ sizeof(struct mshci_idmac);
+}
+
+static int mshci_mdma_table_pre(struct mshci_host *host,
+ struct mmc_data *data)
+{
+ int direction;
+
+ u8 *desc_vir, *desc_phy;
+ dma_addr_t addr;
+ int len;
+
+ struct scatterlist *sg;
+ int i;
+ u32 des_flag;
+ u32 size_idmac = sizeof(struct mshci_idmac);
+
+ if (data->flags & MMC_DATA_READ)
+ direction = DMA_FROM_DEVICE;
+ else
+ direction = DMA_TO_DEVICE;
+
+ if (!data->host_cookie) {
+ if (host->ops->dma_map_sg && data->blocks >= 2048) {
+ /* if transfer size is bigger than 1MiB */
+ host->sg_count = host->ops->dma_map_sg(host,
+ mmc_dev(host->mmc),
+ data->sg, data->sg_len, direction, 2);
+ } else if (host->ops->dma_map_sg && data->blocks >= 128) {
+ /* if transfer size is bigger than 64KiB */
+ host->sg_count = host->ops->dma_map_sg(host,
+ mmc_dev(host->mmc),
+ data->sg, data->sg_len, direction, 1);
+ } else {
+ host->sg_count = dma_map_sg(mmc_dev(host->mmc),
+ data->sg, data->sg_len, direction);
+ }
+
+ if (host->sg_count == 0)
+ goto fail;
+ } else
+ host->sg_count = data->host_cookie;
+
+ desc_vir = host->idma_desc;
+
+ /* to know phy address */
+ host->idma_addr = dma_map_single(mmc_dev(host->mmc),
+ host->idma_desc,
+ /* cache flush for only transfer size */
+ (host->sg_count+1) * 16,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(mmc_dev(host->mmc), host->idma_addr))
+ goto unmap_entries;
+ BUG_ON(host->idma_addr & 0x3);
+
+ desc_phy = (u8 *)host->idma_addr;
+
+ for_each_sg(data->sg, sg, host->sg_count, i) {
+ addr = sg_dma_address(sg);
+ len = sg_dma_len(sg);
+
+ /* tran, valid */
+ des_flag = (MSHCI_IDMAC_OWN|MSHCI_IDMAC_CH);
+ des_flag |= (i == 0) ? MSHCI_IDMAC_FS : 0;
+
+ mshci_set_mdma_desc(desc_vir, desc_phy, des_flag, len, addr);
+ desc_vir += size_idmac;
+ desc_phy += size_idmac;
+
+ /*
+ * If this triggers then we have a calculation bug
+ * somewhere. :/
+ */
+ WARN_ON((desc_vir - host->idma_desc) > MSHCI_MAX_DMA_LIST * \
+ size_idmac);
+ }
+
+ /*
+ * Add a terminating flag.
+ */
+ ((struct mshci_idmac *)(desc_vir-size_idmac))->des0 |= MSHCI_IDMAC_LD;
+
+ /* it has to dma map again to resync vir data to phy data */
+ host->idma_addr = dma_map_single(mmc_dev(host->mmc),
+ host->idma_desc,
+ /* cache flush for only transfer size */
+ (host->sg_count+1) * 16,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(mmc_dev(host->mmc), host->idma_addr))
+ goto unmap_entries;
+ BUG_ON(host->idma_addr & 0x3);
+
+ return 0;
+
+unmap_entries:
+ if (host->ops->dma_unmap_sg && data->blocks >= 2048) {
+ /* if transfer size is bigger than 1MiB */
+ host->ops->dma_unmap_sg(host, mmc_dev(host->mmc),
+ data->sg, data->sg_len, direction, 2);
+ } else if (host->ops->dma_unmap_sg && data->blocks >= 128) {
+ /* if transfer size is bigger than 64KiB */
+ host->ops->dma_unmap_sg(host, mmc_dev(host->mmc),
+ data->sg, data->sg_len, direction, 1);
+ } else {
+ dma_unmap_sg(mmc_dev(host->mmc),
+ data->sg, data->sg_len, direction);
+ }
+fail:
+ return -EINVAL;
+}
+
+static void mshci_idma_table_post(struct mshci_host *host,
+ struct mmc_data *data)
+{
+ int direction;
+
+ if (data->flags & MMC_DATA_READ)
+ direction = DMA_FROM_DEVICE;
+ else
+ direction = DMA_TO_DEVICE;
+
+ dma_unmap_single(mmc_dev(host->mmc), host->idma_addr,
+ /* cache flush for only transfer size */
+ (host->sg_count+1) * 16,
+ DMA_TO_DEVICE);
+
+ if (!host->mmc->ops->post_req || !data->host_cookie) {
+ if (host->ops->dma_unmap_sg && data->blocks >= 2048) {
+ /* if transfer size is bigger than 1MiB */
+ host->ops->dma_unmap_sg(host, mmc_dev(host->mmc),
+ data->sg, data->sg_len, direction, 2);
+ } else if (host->ops->dma_unmap_sg && data->blocks >= 128) {
+ /* if transfer size is bigger than 64KiB */
+ host->ops->dma_unmap_sg(host, mmc_dev(host->mmc),
+ data->sg, data->sg_len, direction, 1);
+ } else {
+ dma_unmap_sg(mmc_dev(host->mmc),
+ data->sg, data->sg_len, direction);
+ }
+ }
+}
+
+static u32 mshci_calc_timeout(struct mshci_host *host, struct mmc_data *data)
+{
+ return 0xffffffff; /* this value SHOULD be optimized */
+}
+
+static void mshci_set_transfer_irqs(struct mshci_host *host)
+{
+ u32 dma_irqs = INTMSK_DMA;
+ u32 pio_irqs = INTMSK_TXDR | INTMSK_RXDR;
+
+ if (host->flags & MSHCI_REQ_USE_DMA)
+ mshci_clear_set_irqs(host, dma_irqs, 0);
+ else
+ mshci_clear_set_irqs(host, 0, pio_irqs);
+}
+
+static void mshci_prepare_data(struct mshci_host *host, struct mmc_data *data)
+{
+ u32 count;
+ u32 ret;
+
+ WARN_ON(host->data);
+
+ if (data == NULL)
+ return;
+
+ BUG_ON(data->blksz > host->mmc->max_blk_size);
+ BUG_ON(data->blocks > host->mmc->max_blk_count);
+
+ host->data = data;
+ host->data_early = 0;
+
+ count = mshci_calc_timeout(host, data);
+ mshci_writel(host, count, MSHCI_TMOUT);
+
+ mshci_reset_fifo(host);
+
+ if (host->flags & (MSHCI_USE_IDMA))
+ host->flags |= MSHCI_REQ_USE_DMA;
+
+ if (data->host_cookie)
+ goto check_done;
+ /*
+ * FIXME: This doesn't account for merging when mapping the
+ * scatterlist.
+ */
+ if (host->flags & MSHCI_REQ_USE_DMA) {
+ /* mshc's IDMAC can't transfer data that is not aligned
+ * or has length not divided by 4 byte. */
+ int i;
+ struct scatterlist *sg;
+
+ for_each_sg(data->sg, sg, data->sg_len, i) {
+ if (sg->length & 0x3) {
+ DBG("Reverting to PIO because of "
+ "transfer size (%d)\n",
+ sg->length);
+ host->flags &= ~MSHCI_REQ_USE_DMA;
+ break;
+ } else if (sg->offset & 0x3) {
+ DBG("Reverting to PIO because of "
+ "bad alignment\n");
+ host->flags &= ~MSHCI_REQ_USE_DMA;
+ break;
+ }
+ }
+ }
+check_done:
+
+ if (host->flags & MSHCI_REQ_USE_DMA) {
+ ret = mshci_mdma_table_pre(host, data);
+ if (ret) {
+ /*
+ * This only happens when someone fed
+ * us an invalid request.
+ */
+ WARN_ON(1);
+ host->flags &= ~MSHCI_REQ_USE_DMA;
+ } else {
+ mshci_writel(host, host->idma_addr,
+ MSHCI_DBADDR);
+ }
+ }
+
+ if (host->flags & MSHCI_REQ_USE_DMA) {
+ /* enable DMA, IDMA interrupts and IDMAC */
+ mshci_writel(host, (mshci_readl(host, MSHCI_CTRL) |
+ ENABLE_IDMAC|DMA_ENABLE), MSHCI_CTRL);
+ mshci_writel(host, (mshci_readl(host, MSHCI_BMOD) |
+ (BMOD_IDMAC_ENABLE|BMOD_IDMAC_FB)),
+ MSHCI_BMOD);
+ mshci_writel(host, INTMSK_IDMAC_ERROR, MSHCI_IDINTEN);
+ }
+
+ if (!(host->flags & MSHCI_REQ_USE_DMA)) {
+ int flags;
+
+ flags = SG_MITER_ATOMIC;
+ if (host->data->flags & MMC_DATA_READ)
+ flags |= SG_MITER_TO_SG;
+ else
+ flags |= SG_MITER_FROM_SG;
+
+ sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
+ host->blocks = data->blocks;
+
+ printk(KERN_ERR "it starts transfer on PIO\n");
+ }
+
+ /* set transfered data as 0. this value only uses for PIO write */
+ host->data_transfered = 0;
+ mshci_set_transfer_irqs(host);
+
+ mshci_writel(host, data->blksz, MSHCI_BLKSIZ);
+ mshci_writel(host, (data->blocks * data->blksz), MSHCI_BYTCNT);
+}
+
+static u32 mshci_set_transfer_mode(struct mshci_host *host,
+ struct mmc_data *data)
+{
+ u32 ret = 0;
+
+ if (data == NULL)
+ return ret;
+
+ WARN_ON(!host->data);
+
+ /* this cmd has data to transmit */
+ ret |= CMD_DATA_EXP_BIT;
+
+ if (data->flags & MMC_DATA_WRITE)
+ ret |= CMD_RW_BIT;
+ if (data->flags & MMC_DATA_STREAM)
+ ret |= CMD_TRANSMODE_BIT;
+
+ return ret;
+}
+
+static void mshci_finish_data(struct mshci_host *host)
+{
+ struct mmc_data *data;
+
+ BUG_ON(!host->data);
+
+ data = host->data;
+ host->data = NULL;
+
+ if (host->flags & MSHCI_REQ_USE_DMA) {
+ mshci_idma_table_post(host, data);
+ /* disable IDMAC and DMA interrupt */
+ mshci_writel(host, (mshci_readl(host, MSHCI_CTRL) &
+ ~(DMA_ENABLE|ENABLE_IDMAC)), MSHCI_CTRL);
+ /* mask all interrupt source of IDMAC */
+ mshci_writel(host, 0x0, MSHCI_IDINTEN);
+ }
+
+ if (data->error) {
+ /* to go to idle state */
+ mshci_reset_ciu(host);
+ /* to clear fifo */
+ mshci_reset_fifo(host);
+ /* to reset dma */
+ mshci_reset_dma(host);
+ data->bytes_xfered = 0;
+ } else
+ data->bytes_xfered = data->blksz * data->blocks;
+
+ /*
+ * Need to send CMD12 if -
+ * a) open-ended multiblock transfer (no CMD23)
+ * b) error in multiblock transfer
+ */
+ if (data->stop && ((data->error) ||
+ !(host->mmc->caps & MMC_CAP_CMD23) ||
+ ((host->mmc->caps & MMC_CAP_CMD23) &&
+ !host->mrq->sbc))) /* packed cmd case */
+ mshci_send_command(host, data->stop);
+ else
+ tasklet_schedule(&host->finish_tasklet);
+}
+
+static void mshci_wait_release_start_bit(struct mshci_host *host)
+{
+ u32 loop_count = 1000000;
+
+ ktime_t expires;
+ u64 add_time = 100000; /* 100us */
+
+ /* before off clock, make sure data busy is released. */
+ while (mshci_readl(host, MSHCI_STATUS) & (1<<9) && --loop_count) {
+ spin_unlock_irqrestore(&host->lock, host->sl_flags);
+ expires = ktime_add_ns(ktime_get(), add_time);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_hrtimeout(&expires, HRTIMER_MODE_ABS);
+ spin_lock_irqsave(&host->lock, host->sl_flags);
+ }
+ if (loop_count == 0)
+ printk(KERN_ERR "%s: cmd_strt_bit not released for 11sec\n",
+ mmc_hostname(host->mmc));
+
+ loop_count = 1000000;
+ do {
+ if (!(mshci_readl(host, MSHCI_CMD) & CMD_STRT_BIT))
+ break;
+ loop_count--;
+ udelay(1);
+ } while (loop_count);
+ if (loop_count == 0)
+ printk(KERN_ERR "%s: cmd_strt_bit not released for 1sec\n",
+ mmc_hostname(host->mmc));
+}
+
+static void mshci_clock_onoff(struct mshci_host *host, bool val)
+{
+ mshci_wait_release_start_bit(host);
+
+ if (val) {
+ mshci_writel(host, (0x1<<0), MSHCI_CLKENA);
+ mshci_writel(host, 0, MSHCI_CMD);
+ mshci_writel(host, CMD_ONLY_CLK, MSHCI_CMD);
+ } else {
+ mshci_writel(host, (0x0<<0), MSHCI_CLKENA);
+ mshci_writel(host, 0, MSHCI_CMD);
+ mshci_writel(host, CMD_ONLY_CLK, MSHCI_CMD);
+ }
+}
+
+static void mshci_send_command(struct mshci_host *host, struct mmc_command *cmd)
+{
+ int flags, ret;
+
+ WARN_ON(host->cmd);
+
+ /* clear error_state */
+ if (cmd->opcode != 12)
+ host->error_state = 0;
+
+ /* disable interrupt before issuing cmd to the card. */
+ mshci_writel(host, (mshci_readl(host, MSHCI_CTRL) & ~INT_ENABLE),
+ MSHCI_CTRL);
+
+ mod_timer(&host->timer, jiffies + 10 * HZ);
+
+ host->cmd = cmd;
+
+ mshci_prepare_data(host, cmd->data);
+
+ mshci_writel(host, cmd->arg, MSHCI_CMDARG);
+
+ flags = mshci_set_transfer_mode(host, cmd->data);
+
+ if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
+ printk(KERN_ERR "%s: Unsupported response type!\n",
+ mmc_hostname(host->mmc));
+ cmd->error = -EINVAL;
+ tasklet_schedule(&host->finish_tasklet);
+ return;
+ }
+
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ flags |= CMD_RESP_EXP_BIT;
+ if (cmd->flags & MMC_RSP_136)
+ flags |= CMD_RESP_LENGTH_BIT;
+ }
+ if (cmd->flags & MMC_RSP_CRC)
+ flags |= CMD_CHECK_CRC_BIT;
+
+ flags |= (cmd->opcode | CMD_STRT_BIT | host->hold_bit |
+ CMD_WAIT_PRV_DAT_BIT);
+
+ ret = mshci_readl(host, MSHCI_CMD);
+ if (ret & CMD_STRT_BIT)
+ printk(KERN_ERR "CMD busy. current cmd %d. last cmd reg 0x%x\n",
+ cmd->opcode, ret);
+
+ mshci_writel(host, flags, MSHCI_CMD);
+
+ /* enable interrupt upon it sends a command to the card. */
+ mshci_writel(host, (mshci_readl(host, MSHCI_CTRL) | INT_ENABLE),
+ MSHCI_CTRL);
+}
+
+static void mshci_finish_command(struct mshci_host *host)
+{
+ int i;
+
+ BUG_ON(host->cmd == NULL);
+
+ if (host->cmd->flags & MMC_RSP_PRESENT) {
+ if (host->cmd->flags & MMC_RSP_136) {
+ /*
+ * response data are overturned.
+ */
+ for (i = 0; i < 4; i++) {
+ host->cmd->resp[0] = mshci_readl(host,
+ MSHCI_RESP3);
+ host->cmd->resp[1] = mshci_readl(host,
+ MSHCI_RESP2);
+ host->cmd->resp[2] = mshci_readl(host,
+ MSHCI_RESP1);
+ host->cmd->resp[3] = mshci_readl(host,
+ MSHCI_RESP0);
+ }
+ } else {
+ host->cmd->resp[0] = mshci_readl(host, MSHCI_RESP0);
+ }
+ }
+
+ host->cmd->error = 0;
+
+ if (host->data && host->data_early)
+ mshci_finish_data(host);
+
+ if (!host->cmd->data)
+ tasklet_schedule(&host->finish_tasklet);
+
+ host->cmd = NULL;
+}
+
+static void mshci_set_clock(struct mshci_host *host,
+ unsigned int clock, u32 ddr)
+{
+ int div;
+
+ /* befor changing clock. clock needs to be off. */
+ mshci_clock_onoff(host, CLK_DISABLE);
+
+ if (clock == 0)
+ goto out;
+
+ if (clock >= host->max_clk) {
+ div = 0;
+ } else {
+ for (div = 1; div <= 0xff; div++) {
+ /* div value should not be greater than 0xff */
+ if ((host->max_clk / (div<<1)) <= clock)
+ break;
+ }
+ }
+
+ mshci_wait_release_start_bit(host);
+
+ mshci_writel(host, div, MSHCI_CLKDIV);
+
+ mshci_writel(host, 0, MSHCI_CMD);
+ mshci_writel(host, CMD_ONLY_CLK, MSHCI_CMD);
+ mshci_writel(host, mshci_readl(host, MSHCI_CMD)&(~CMD_SEND_CLK_ONLY),
+ MSHCI_CMD);
+
+ mshci_clock_onoff(host, CLK_ENABLE);
+
+out:
+ host->clock = clock;
+}
+
+static void mshci_set_power(struct mshci_host *host, unsigned short power)
+{
+ u8 pwr = power;
+
+ if (power == (unsigned short)-1)
+ pwr = 0;
+
+ if (host->pwr == pwr)
+ return;
+
+ host->pwr = pwr;
+
+ if (pwr == 0)
+ mshci_writel(host, 0, MSHCI_PWREN);
+ else
+ mshci_writel(host, 0x1, MSHCI_PWREN);
+}
+
+#ifdef CONFIG_MMC_POLLING_WAIT_CMD23
+static void mshci_check_sbc_status(struct mshci_host *host, int intmask)
+{
+ int timeout, int_status;;
+
+ /* wait for command done or error by polling */
+ timeout = 0x100000; /* it is bigger than 1ms */
+ do {
+ int_status = mshci_readl(host, MSHCI_RINTSTS);
+ if (int_status & CMD_STATUS)
+ break;
+ timeout--;
+ } while (timeout);
+
+ /* clear pending interupt bit */
+ mshci_writel(host, int_status, MSHCI_RINTSTS);
+
+ /* check whether command error has been occured or not. */
+ if (int_status & INTMSK_HTO) {
+ printk(KERN_ERR "%s: %s Host timeout error\n",
+ mmc_hostname(host->mmc),
+ __func__);
+ host->mrq->sbc->error = -ETIMEDOUT;
+ } else if (int_status & INTMSK_DRTO) {
+ printk(KERN_ERR "%s: %s Data read timeout error\n",
+ mmc_hostname(host->mmc),
+ __func__);
+ host->mrq->sbc->error = -ETIMEDOUT;
+ } else if (int_status & INTMSK_SBE) {
+ printk(KERN_ERR "%s: %s FIFO Start bit error\n",
+ mmc_hostname(host->mmc),
+ __func__);
+ host->mrq->sbc->error = -EIO;
+ } else if (int_status & INTMSK_EBE) {
+ printk(KERN_ERR "%s: %s FIFO Endbit/Write no CRC error\n",
+ mmc_hostname(host->mmc),
+ __func__);
+ host->mrq->sbc->error = -EIO;
+ } else if (int_status & INTMSK_DCRC) {
+ printk(KERN_ERR "%s: %s Data CRC error\n",
+ mmc_hostname(host->mmc),
+ __func__);
+ host->mrq->sbc->error = -EIO;
+ } else if (int_status & INTMSK_FRUN) {
+ printk(KERN_ERR "%s: %s FIFO underrun/overrun error\n",
+ mmc_hostname(host->mmc),
+ __func__);
+ host->mrq->sbc->error = -EIO;
+ } else if (int_status & CMD_ERROR) {
+ printk(KERN_ERR "%s: %s cmd %s error\n",
+ mmc_hostname(host->mmc),
+ __func__, (intmask & INTMSK_RCRC) ?
+ "response crc" :
+ (intmask & INTMSK_RE) ? "response" :
+ "response timeout");
+ host->mrq->sbc->error = -ETIMEDOUT;
+ }
+
+ if (host->mrq->sbc->error) {
+ /* restore interrupt mask bit */
+ mshci_writel(host, intmask, MSHCI_INTMSK);
+ return;
+ }
+
+ if (!timeout) {
+ printk(KERN_ERR "%s: %s no interrupt occured\n",
+ mmc_hostname(host->mmc), __func__);
+ host->mrq->sbc->error = -ETIMEDOUT;
+ /* restore interrupt mask bit */
+ mshci_writel(host, intmask, MSHCI_INTMSK);
+ return;
+ }
+
+ /* command done interrupt has been occured with no errors.
+ nothing to do. just return to the previous function */
+ if ((int_status & INTMSK_CDONE) && !(int_status & CMD_ERROR)) {
+ /* restore interrupt mask bit */
+ mshci_writel(host, intmask, MSHCI_INTMSK);
+ return;
+ }
+
+ /* should not be here */
+ printk(KERN_ERR "%s: an error that has not to be occured was"
+ " occured 0x%x\n",mmc_hostname(host->mmc),int_status);
+}
+
+static void mshci_send_sbc(struct mshci_host *host, struct mmc_command *cmd)
+{
+ int flags = 0, ret, intmask;
+
+ WARN_ON(host->cmd);
+
+ /* disable interrupt before issuing cmd to the card. */
+ mshci_writel(host, (mshci_readl(host, MSHCI_CTRL) & ~INT_ENABLE),
+ MSHCI_CTRL);
+
+ host->cmd = cmd;
+
+ mod_timer(&host->timer, jiffies + 10 * HZ);
+
+ mshci_writel(host, cmd->arg, MSHCI_CMDARG);
+
+ if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
+ printk(KERN_ERR "%s: Unsupported response type!\n",
+ mmc_hostname(host->mmc));
+ cmd->error = -EINVAL;
+ tasklet_schedule(&host->finish_tasklet);
+ return;
+ }
+
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ flags |= CMD_RESP_EXP_BIT;
+ if (cmd->flags & MMC_RSP_136)
+ flags |= CMD_RESP_LENGTH_BIT;
+ }
+ if (cmd->flags & MMC_RSP_CRC)
+ flags |= CMD_CHECK_CRC_BIT;
+
+ flags |= (cmd->opcode | CMD_STRT_BIT | host->hold_bit |
+ CMD_WAIT_PRV_DAT_BIT);
+
+ ret = mshci_readl(host, MSHCI_CMD);
+ if (ret & CMD_STRT_BIT)
+ printk(KERN_ERR "CMD busy. current cmd %d. last cmd reg 0x%x\n",
+ cmd->opcode, ret);
+
+ /* backup interrupt mask bit */
+ intmask = mshci_readl(host, MSHCI_INTMSK);
+
+ /* disable interrupts for sbc command. it will wait for command done
+ by polling. it expects a faster repsonse */
+ mshci_clear_set_irqs(host, INTMSK_ALL, 0);
+
+ /* send command */
+ mshci_writel(host, flags, MSHCI_CMD);
+
+ /* enable interrupt upon it sends a command to the card. */
+ mshci_writel(host, (mshci_readl(host, MSHCI_CTRL) | INT_ENABLE),
+ MSHCI_CTRL);
+
+ /* check the interrupt by polling */
+ mshci_check_sbc_status(host,intmask);
+}
+#endif
+
+/*****************************************************************************\
+ * *
+ * MMC callbacks *
+ * *
+\*****************************************************************************/
+
+static void mshci_request(struct mmc_host *mmc, struct mmc_request *mrq)
+{
+ struct mshci_host *host;
+ bool present;
+ int timeout;
+ ktime_t expires;
+ u64 add_time = 50000; /* 50us */
+
+ host = mmc_priv(mmc);
+
+ WARN_ON(host->mrq != NULL);
+
+ host->mrq = mrq;
+
+ /* Wait max 1 sec */
+ timeout = 100000;
+
+ /* We shouldn't wait for data inihibit for stop commands, even
+ though they might use busy signaling */
+ if (mrq->cmd->opcode == 12) {
+ /* nothing to do */
+ } else {
+ for (;;) {
+ spin_lock_irqsave(&host->lock, host->sl_flags);
+ if (mshci_readl(host, MSHCI_STATUS) & (1<<9)) {
+ if (timeout == 0) {
+ printk(KERN_ERR "%s: Controller never"
+ " released data0.\n",
+ mmc_hostname(host->mmc));
+ mshci_dumpregs(host);
+
+ mrq->cmd->error = -ENOTRECOVERABLE;
+ host->error_state = 1;
+
+ tasklet_schedule \
+ (&host->finish_tasklet);
+ spin_unlock_irqrestore \
+ (&host->lock, host->sl_flags);
+ return;
+ }
+ timeout--;
+
+ /* if previous command made an error,
+ * this function might be called by tasklet.
+ * So, it SHOULD NOT use schedule_hrtimeout */
+ if (host->error_state == 1) {
+ spin_unlock_irqrestore
+ (&host->lock, host->sl_flags);
+ udelay(10);
+ } else {
+ spin_unlock_irqrestore
+ (&host->lock, host->sl_flags);
+ expires = ktime_add_ns
+ (ktime_get(), add_time);
+ set_current_state
+ (TASK_UNINTERRUPTIBLE);
+ schedule_hrtimeout
+ (&expires, HRTIMER_MODE_ABS);
+ }
+ } else {
+ spin_unlock_irqrestore(&host->lock,
+ host->sl_flags);
+ break;
+ }
+ }
+ }
+ spin_lock_irqsave(&host->lock, host->sl_flags);
+ /* If polling, assume that the card is always present. */
+ if (host->quirks & MSHCI_QUIRK_BROKEN_CARD_DETECTION ||
+ host->quirks & MSHCI_QUIRK_BROKEN_PRESENT_BIT)
+ present = true;
+ else
+ present = !(mshci_readl(host, MSHCI_CDETECT) & CARD_PRESENT);
+
+ if (!present || host->flags & MSHCI_DEVICE_DEAD) {
+ host->mrq->cmd->error = -ENOMEDIUM;
+ tasklet_schedule(&host->finish_tasklet);
+ } else {
+#ifdef CONFIG_MMC_POLLING_WAIT_CMD23
+ if (mrq->sbc) {
+ mshci_send_sbc(host, mrq->sbc);
+ if (mrq->sbc->error) {
+ tasklet_schedule(&host->finish_tasklet);
+ } else {
+ if (host->cmd)
+ host->cmd = NULL;
+ mshci_send_command(host, mrq->cmd);
+ }
+ } else
+#endif
+ mshci_send_command(host, mrq->cmd);
+ }
+
+ mmiowb();
+ spin_unlock_irqrestore(&host->lock, host->sl_flags);
+}
+
+static void mshci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct mshci_host *host;
+ u32 regs;
+
+ host = mmc_priv(mmc);
+
+ spin_lock_irqsave(&host->lock, host->sl_flags);
+
+ if (host->flags & MSHCI_DEVICE_DEAD)
+ goto out;
+
+ if (ios->power_mode == MMC_POWER_OFF)
+ mshci_reinit(host);
+
+#ifdef CONFIG_MMC_CLKGATE
+ /* gating the clock and out */
+ if (mmc->clk_gated) {
+ WARN_ON(ios->clock != 0);
+ if (host->clock != 0)
+ mshci_set_clock(host, ios->clock, ios->ddr);
+ goto out;
+ }
+#endif
+
+ if (host->ops->set_ios)
+ host->ops->set_ios(host, ios);
+
+ mshci_set_clock(host, ios->clock, ios->ddr);
+
+ if (ios->power_mode == MMC_POWER_OFF)
+ mshci_set_power(host, -1);
+ else
+ mshci_set_power(host, ios->vdd);
+
+ regs = mshci_readl(host, MSHCI_UHS_REG);
+
+ if (ios->bus_width == MMC_BUS_WIDTH_8) {
+ mshci_writel(host, (0x1<<16), MSHCI_CTYPE);
+ if (ios->timing == MMC_TIMING_UHS_DDR50) {
+ regs |= (0x1 << 16);
+ mshci_writel(host, regs, MSHCI_UHS_REG);
+ /* if exynos4412 EVT1 or the latest one */
+ if (soc_is_exynos4412() &&
+ samsung_rev() >= EXYNOS4412_REV_1_0) {
+ if ((host->max_clk/2) < 46300000) {
+ mshci_writel(host, (0x00010001),
+ MSHCI_CLKSEL);
+ } else {
+ mshci_writel(host, (0x00020002),
+ MSHCI_CLKSEL);
+ }
+ } else {
+ if ((host->max_clk/2) < 40000000)
+ mshci_writel(host, (0x00010001),
+ MSHCI_CLKSEL);
+ else
+ mshci_writel(host, (0x00020002),
+ MSHCI_CLKSEL);
+ }
+ } else {
+ regs &= ~(0x1 << 16);
+ mshci_writel(host, regs|(0x0<<0), MSHCI_UHS_REG);
+ mshci_writel(host, (0x00010001), MSHCI_CLKSEL);
+ }
+ } else if (ios->bus_width == MMC_BUS_WIDTH_4) {
+ mshci_writel(host, (0x1<<0), MSHCI_CTYPE);
+ if (ios->timing == MMC_TIMING_UHS_DDR50) {
+ regs |= (0x1 << 16);
+ mshci_writel(host, regs, MSHCI_UHS_REG);
+ mshci_writel(host, (0x00010001), MSHCI_CLKSEL);
+ } else {
+ regs &= ~(0x1 << 16);
+ mshci_writel(host, regs|(0x0<<0), MSHCI_UHS_REG);
+ mshci_writel(host, (0x00010001), MSHCI_CLKSEL);
+ }
+ } else {
+ regs &= ~(0x1 << 16);
+ mshci_writel(host, regs|0, MSHCI_UHS_REG);
+ mshci_writel(host, (0x0<<0), MSHCI_CTYPE);
+ mshci_writel(host, (0x00010001), MSHCI_CLKSEL);
+ }
+out:
+ mmiowb();
+ spin_unlock_irqrestore(&host->lock, host->sl_flags);
+}
+
+static int mshci_get_ro(struct mmc_host *mmc)
+{
+ struct mshci_host *host;
+ int wrtprt;
+
+ host = mmc_priv(mmc);
+
+ spin_lock_irqsave(&host->lock, host->sl_flags);
+
+ if (host->quirks & MSHCI_QUIRK_NO_WP_BIT)
+ wrtprt = host->ops->get_ro(mmc) ? 0 : WRTPRT_ON;
+ else if (host->flags & MSHCI_DEVICE_DEAD)
+ wrtprt = 0;
+ else
+ wrtprt = mshci_readl(host, MSHCI_WRTPRT);
+
+ spin_unlock_irqrestore(&host->lock, host->sl_flags);
+
+ return wrtprt & WRTPRT_ON;
+}
+
+static void mshci_enable_sdio_irq(struct mmc_host *mmc, int enable)
+{
+ struct mshci_host *host;
+
+ host = mmc_priv(mmc);
+
+ spin_lock_irqsave(&host->lock, host->sl_flags);
+
+ if (host->flags & MSHCI_DEVICE_DEAD)
+ goto out;
+
+ if (enable)
+ mshci_unmask_irqs(host, SDIO_INT_ENABLE);
+ else
+ mshci_mask_irqs(host, SDIO_INT_ENABLE);
+out:
+ mmiowb();
+
+ spin_unlock_irqrestore(&host->lock, host->sl_flags);
+}
+
+static void mshci_init_card(struct mmc_host *mmc, struct mmc_card *card)
+{
+ struct mshci_host *host;
+
+ host = mmc_priv(mmc);
+
+ spin_lock_irqsave(&host->lock, host->sl_flags);
+
+ if (host->flags & MSHCI_DEVICE_DEAD)
+ goto out;
+
+ if (host->ops->init_card)
+ host->ops->init_card(host);
+out:
+ mmiowb();
+
+ spin_unlock_irqrestore(&host->lock, host->sl_flags);
+}
+
+static void mshci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq,
+ bool is_first_req)
+{
+ struct mshci_host *host;
+ struct mmc_data *data = mrq->data;
+ int sg_count, direction;
+
+ host = mmc_priv(mmc);
+ spin_lock_irqsave(&host->lock, host->sl_flags);
+
+ if (!data)
+ goto out;
+
+ if (data->host_cookie) {
+ data->host_cookie = 0;
+ goto out;
+ }
+
+ if (host->flags & MSHCI_USE_IDMA) {
+ /* mshc's IDMAC can't transfer data that is not aligned
+ * or has length not divided by 4 byte. */
+ int i;
+ struct scatterlist *sg;
+
+ for_each_sg(data->sg, sg, data->sg_len, i) {
+ if (sg->length & 0x3) {
+ DBG("Reverting to PIO because of "
+ "transfer size (%d)\n",
+ sg->length);
+ data->host_cookie = 0;
+ goto out;
+ } else if (sg->offset & 0x3) {
+ DBG("Reverting to PIO because of "
+ "bad alignment\n");
+ host->flags &= ~MSHCI_REQ_USE_DMA;
+ data->host_cookie = 0;
+ goto out;
+ }
+ }
+ }
+
+ if (data->flags & MMC_DATA_READ)
+ direction = DMA_FROM_DEVICE;
+ else
+ direction = DMA_TO_DEVICE;
+
+ if (host->ops->dma_map_sg && data->blocks >= 2048) {
+ /* if transfer size is bigger than 1MiB */
+ sg_count = host->ops->dma_map_sg(host,
+ mmc_dev(host->mmc),
+ data->sg, data->sg_len, direction, 2);
+ } else if (host->ops->dma_map_sg && data->blocks >= 128) {
+ /* if transfer size is bigger than 64KiB */
+ sg_count = host->ops->dma_map_sg(host,
+ mmc_dev(host->mmc),
+ data->sg, data->sg_len, direction, 1);
+ } else {
+ sg_count = dma_map_sg(mmc_dev(host->mmc),
+ data->sg, data->sg_len, direction);
+ }
+
+ if (sg_count == 0)
+ data->host_cookie = 0;
+ else
+ data->host_cookie = sg_count;
+out:
+ spin_unlock_irqrestore(&host->lock, host->sl_flags);
+ return;
+}
+
+static void mshci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
+ int err)
+{
+ struct mshci_host *host;
+ struct mmc_data *data = mrq->data;
+ int direction;
+
+ host = mmc_priv(mmc);
+ spin_lock_irqsave(&host->lock, host->sl_flags);
+
+ if (!data)
+ goto out;
+
+ if (data->flags & MMC_DATA_READ)
+ direction = DMA_FROM_DEVICE;
+ else
+ direction = DMA_TO_DEVICE;
+
+ if ((host->ops->dma_unmap_sg && data->blocks >= 2048 &&
+ data->host_cookie)) {
+ /* if transfer size is bigger than 1MiB */
+ host->ops->dma_unmap_sg(host, mmc_dev(host->mmc),
+ data->sg, data->sg_len, direction, 2);
+ } else if ((host->ops->dma_unmap_sg && data->blocks >= 128 &&
+ data->host_cookie)) {
+ /* if transfer size is bigger than 64KiB */
+ host->ops->dma_unmap_sg(host, mmc_dev(host->mmc),
+ data->sg, data->sg_len, direction, 1);
+ } else if (data->host_cookie) {
+ dma_unmap_sg(mmc_dev(host->mmc),
+ data->sg, data->sg_len, direction);
+ }
+out:
+ spin_unlock_irqrestore(&host->lock, host->sl_flags);
+ return;
+}
+
+static struct mmc_host_ops mshci_ops = {
+ .request = mshci_request,
+ .set_ios = mshci_set_ios,
+ .get_ro = mshci_get_ro,
+ .enable_sdio_irq = mshci_enable_sdio_irq,
+ .init_card = mshci_init_card,
+#ifdef CONFIG_MMC_MSHCI_ASYNC_OPS
+ .pre_req = mshci_pre_req,
+ .post_req = mshci_post_req,
+#endif
+};
+
+/*****************************************************************************\
+ * *
+ * Tasklets *
+ * *
+\*****************************************************************************/
+
+static void mshci_tasklet_card(unsigned long param)
+{
+ struct mshci_host *host;
+
+ host = (struct mshci_host *)param;
+
+ spin_lock_irqsave(&host->lock, host->sl_flags);
+
+ if ((host->quirks & MSHCI_QUIRK_BROKEN_CARD_DETECTION) ||
+ (host->quirks & MSHCI_QUIRK_BROKEN_PRESENT_BIT) ||
+ (mshci_readl(host, MSHCI_CDETECT) & CARD_PRESENT)) {
+ if (host->mrq) {
+ printk(KERN_ERR "%s: Card removed during transfer!\n",
+ mmc_hostname(host->mmc));
+ printk(KERN_ERR "%s: Resetting controller.\n",
+ mmc_hostname(host->mmc));
+
+ host->mrq->cmd->error = -ENOMEDIUM;
+ tasklet_schedule(&host->finish_tasklet);
+ }
+ }
+
+ spin_unlock_irqrestore(&host->lock, host->sl_flags);
+
+ mmc_detect_change(host->mmc, msecs_to_jiffies(200));
+}
+
+static void mshci_tasklet_finish(unsigned long param)
+{
+ struct mshci_host *host;
+ struct mmc_request *mrq;
+
+ host = (struct mshci_host *)param;
+
+ if (host == NULL)
+ return;
+
+ spin_lock_irqsave(&host->lock, host->sl_flags);
+
+ del_timer(&host->timer);
+
+ mrq = host->mrq;
+
+ if (mrq == NULL || mrq->cmd == NULL)
+ goto out;
+
+ /*
+ * The controller needs a reset of internal state machines
+ * upon error conditions.
+ */
+ if (!(host->flags & MSHCI_DEVICE_DEAD) &&
+ (mrq->cmd->error ||
+#ifdef CONFIG_MMC_POLLING_WAIT_CMD23
+ (mrq->sbc && mrq->sbc->error) ||
+#endif
+ (mrq->data && (mrq->data->error ||
+ (mrq->data->stop && mrq->data->stop->error))))) {
+ mshci_reset_fifo(host);
+ }
+
+out:
+ host->mrq = NULL;
+ host->cmd = NULL;
+ host->data = NULL;
+
+ mmiowb();
+ spin_unlock_irqrestore(&host->lock, host->sl_flags);
+
+ if (mrq)
+ mmc_request_done(host->mmc, mrq);
+}
+
+static void mshci_timeout_timer(unsigned long data)
+{
+ struct mshci_host *host;
+
+ host = (struct mshci_host *)data;
+
+ spin_lock_irqsave(&host->lock, host->sl_flags);
+
+ if (host->mrq) {
+ printk(KERN_ERR "%s: Timeout waiting for hardware "
+ "interrupt.\n", mmc_hostname(host->mmc));
+ mshci_dumpregs(host);
+
+ if (host->data) {
+ host->data->error = -ETIMEDOUT;
+ mshci_finish_data(host);
+ } else {
+ if (host->cmd)
+ host->cmd->error = -ETIMEDOUT;
+ else
+ host->mrq->cmd->error = -ETIMEDOUT;
+
+ tasklet_schedule(&host->finish_tasklet);
+ }
+ }
+
+ mmiowb();
+ spin_unlock_irqrestore(&host->lock, host->sl_flags);
+}
+
+/*****************************************************************************\
+ * *
+ * Interrupt handling *
+ * *
+\*****************************************************************************/
+
+static void mshci_cmd_irq(struct mshci_host *host, u32 intmask)
+{
+ BUG_ON(intmask == 0);
+
+ if (!host->cmd) {
+ printk(KERN_ERR "%s: Got command interrupt 0x%08x even "
+ "though no command operation was in progress.\n",
+ mmc_hostname(host->mmc), (unsigned)intmask);
+ mshci_dumpregs(host);
+ return;
+ }
+
+ if (intmask & INTMSK_RTO) {
+ host->cmd->error = -ETIMEDOUT;
+ printk(KERN_ERR "%s: cmd %d response timeout error\n",
+ mmc_hostname(host->mmc), host->cmd->opcode);
+ } else if (intmask & (INTMSK_RCRC | INTMSK_RE)) {
+ host->cmd->error = -EILSEQ;
+ printk(KERN_ERR "%s: cmd %d repsonse %s error\n",
+ mmc_hostname(host->mmc), host->cmd->opcode,
+ (intmask & INTMSK_RCRC) ? "crc" : "RE");
+ }
+ if (host->cmd->error) {
+ /* to notify an error happend */
+ host->error_state = 1;
+#if defined(CONFIG_MACH_M0) || defined(CONFIG_MACH_P4NOTE) || \
+ defined(CONFIG_MACH_C1_USA_ATT)
+ /* dh0421.hwang */
+ if (host->mmc && host->mmc->card)
+ mshci_dumpregs(host);
+#endif
+ tasklet_schedule(&host->finish_tasklet);
+ return;
+ }
+
+ if (intmask & INTMSK_CDONE)
+ mshci_finish_command(host);
+}
+
+static void mshci_data_irq(struct mshci_host *host, u32 intmask, u8 intr_src)
+{
+ BUG_ON(intmask == 0);
+
+ if (!host->data) {
+ /*
+ * The "data complete" interrupt is also used to
+ * indicate that a busy state has ended. See comment
+ * above in mshci_cmd_irq().
+ */
+ if (host->cmd && (host->cmd->flags & MMC_RSP_BUSY)) {
+ if (intmask & INTMSK_DTO) {
+ mshci_finish_command(host);
+ return;
+ }
+ }
+
+ printk(KERN_ERR "%s: Got data interrupt 0x%08x from %s "
+ "even though no data operation was in progress.\n",
+ mmc_hostname(host->mmc), (unsigned)intmask,
+ intr_src ? "MINT" : "IDMAC");
+ mshci_dumpregs(host);
+
+ return;
+ }
+ if (intr_src == INT_SRC_MINT) {
+ if (intmask & INTMSK_HTO) {
+ printk(KERN_ERR "%s: Host timeout error\n",
+ mmc_hostname(host->mmc));
+ host->data->error = -ETIMEDOUT;
+#if 1 /* debugging for Host timeout error */
+ mshci_dumpregs(host);
+ panic("[TEST] %s: HTO error interrupt occured\n",
+ mmc_hostname(host->mmc));
+#endif
+ } else if (intmask & INTMSK_DRTO) {
+ printk(KERN_ERR "%s: Data read timeout error\n",
+ mmc_hostname(host->mmc));
+ host->data->error = -ETIMEDOUT;
+ } else if (intmask & INTMSK_SBE) {
+ printk(KERN_ERR "%s: FIFO Start bit error\n",
+ mmc_hostname(host->mmc));
+ host->data->error = -EIO;
+ } else if (intmask & INTMSK_EBE) {
+ printk(KERN_ERR "%s: FIFO Endbit/Write no CRC error\n",
+ mmc_hostname(host->mmc));
+ host->data->error = -EIO;
+ } else if (intmask & INTMSK_DCRC) {
+ printk(KERN_ERR "%s: Data CRC error\n",
+ mmc_hostname(host->mmc));
+ host->data->error = -EIO;
+ } else if (intmask & INTMSK_FRUN) {
+ printk(KERN_ERR "%s: FIFO underrun/overrun error\n",
+ mmc_hostname(host->mmc));
+ host->data->error = -EIO;
+ }
+ } else {
+ if (intmask & IDSTS_FBE) {
+ printk(KERN_ERR "%s: Fatal Bus error on DMA\n",
+ mmc_hostname(host->mmc));
+ host->data->error = -EIO;
+ } else if (intmask & IDSTS_CES) {
+ printk(KERN_ERR "%s: Card error on DMA\n",
+ mmc_hostname(host->mmc));
+ host->data->error = -EIO;
+ } else if (intmask & IDSTS_DU) {
+ printk(KERN_ERR "%s: Description error on DMA\n",
+ mmc_hostname(host->mmc));
+ host->data->error = -EIO;
+ }
+ }
+
+ if (host->data->error) {
+ /* to notify an error happend */
+ host->error_state = 1;
+#if defined(CONFIG_MACH_M0) || defined(CONFIG_MACH_P4NOTE) || \
+ defined(CONFIG_MACH_C1_USA_ATT)
+ /* dh0421.hwang */
+ if (host->mmc && host->mmc->card)
+ mshci_dumpregs(host);
+#endif
+ mshci_finish_data(host);
+ } else {
+ if (!(host->flags & MSHCI_REQ_USE_DMA) &&
+ (((host->data->flags & MMC_DATA_READ) &&
+ (intmask & (INTMSK_RXDR | INTMSK_DTO))) ||
+ ((host->data->flags & MMC_DATA_WRITE) &&
+ (intmask & (INTMSK_TXDR)))))
+ mshci_transfer_pio(host);
+
+ if (intmask & INTMSK_DTO) {
+ if (host->cmd) {
+ /*
+ * Data managed to finish before the
+ * command completed. Make sure we do
+ * things in the proper order.
+ */
+ host->data_early = 1;
+ } else {
+ mshci_finish_data(host);
+ }
+ }
+ }
+}
+
+static irqreturn_t mshci_irq(int irq, void *dev_id)
+{
+ irqreturn_t result;
+ struct mshci_host *host = dev_id;
+ u32 intmask;
+ int cardint = 0;
+ int timeout = 0x10000;
+
+ spin_lock(&host->lock);
+
+ intmask = mshci_readl(host, MSHCI_MINTSTS);
+
+ if (!intmask || intmask == 0xffffffff) {
+ /* check if there is a interrupt for IDMAC */
+ intmask = mshci_readl(host, MSHCI_IDSTS);
+ if (intmask) {
+ mshci_writel(host, intmask, MSHCI_IDSTS);
+ mshci_data_irq(host, intmask, INT_SRC_IDMAC);
+ result = IRQ_HANDLED;
+ goto out;
+ }
+ result = IRQ_NONE;
+ goto out;
+ }
+ DBG("*** %s got interrupt: 0x%08x\n",
+ mmc_hostname(host->mmc), intmask);
+
+ mshci_writel(host, intmask, MSHCI_RINTSTS);
+
+ if (intmask & (INTMSK_CDETECT)) {
+ if (!(host->mmc->caps & MMC_CAP_NONREMOVABLE))
+ tasklet_schedule(&host->card_tasklet);
+ }
+ intmask &= ~INTMSK_CDETECT;
+
+ if (intmask & CMD_STATUS) {
+ if (!(intmask & INTMSK_CDONE) && (intmask & INTMSK_RTO)) {
+ /*
+ * when a error about command timeout occurs,
+ * cmd done intr comes together.
+ * cmd done intr comes later than error intr.
+ * so, it has to wait for cmd done intr.
+ */
+ while (--timeout && !(mshci_readl(host, MSHCI_MINTSTS)
+ & INTMSK_CDONE))
+ ; /* Nothing to do */
+ if (!timeout)
+ printk(KERN_ERR"*** %s time out for CDONE intr\n",
+ mmc_hostname(host->mmc));
+ else
+ mshci_writel(host, INTMSK_CDONE,
+ MSHCI_RINTSTS);
+ mshci_cmd_irq(host, intmask & CMD_STATUS);
+ } else {
+ mshci_cmd_irq(host, intmask & CMD_STATUS);
+ }
+ }
+
+ if (intmask & DATA_STATUS) {
+ if (!(intmask & INTMSK_DTO) && (intmask & INTMSK_DRTO)) {
+ /*
+ * when a error about data timout occurs,
+ * DTO intr comes together.
+ * DTO intr comes later than error intr.
+ * so, it has to wait for DTO intr.
+ */
+ while (--timeout && !(mshci_readl(host, MSHCI_MINTSTS)
+ & INTMSK_DTO))
+ ; /* Nothing to do */
+ if (!timeout)
+ printk(KERN_ERR"*** %s time out for DTO intr\n",
+ mmc_hostname(host->mmc));
+ else
+ mshci_writel(host, INTMSK_DTO,
+ MSHCI_RINTSTS);
+ mshci_data_irq(host, intmask & DATA_STATUS,
+ INT_SRC_MINT);
+ } else {
+ mshci_data_irq(host, intmask & DATA_STATUS,
+ INT_SRC_MINT);
+ }
+ }
+
+ intmask &= ~(CMD_STATUS | DATA_STATUS);
+
+ if (intmask & SDIO_INT_ENABLE)
+ cardint = 1;
+
+ intmask &= ~SDIO_INT_ENABLE;
+
+ if (intmask) {
+ printk(KERN_ERR "%s: Unexpected interrupt 0x%08x.\n",
+ mmc_hostname(host->mmc), intmask);
+ mshci_dumpregs(host);
+ }
+
+ result = IRQ_HANDLED;
+
+ mmiowb();
+out:
+ spin_unlock(&host->lock);
+
+ /*
+ * We have to delay this as it calls back into the driver.
+ */
+ if (cardint)
+ mmc_signal_sdio_irq(host->mmc);
+
+ return result;
+}
+
+/*****************************************************************************\
+ * *
+ * Suspend/resume *
+ * *
+\*****************************************************************************/
+
+#ifdef CONFIG_PM
+
+int mshci_suspend_host(struct mshci_host *host, pm_message_t state)
+{
+ int ret;
+
+ mshci_disable_card_detection(host);
+
+ ret = mmc_suspend_host(host->mmc);
+ if (ret)
+ return ret;
+
+ free_irq(host->irq, host);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mshci_suspend_host);
+
+int mshci_resume_host(struct mshci_host *host)
+{
+ int ret;
+ int count;
+
+ if (host->flags & (MSHCI_USE_IDMA)) {
+ if (host->ops->enable_dma)
+ host->ops->enable_dma(host);
+ }
+
+ mshci_init(host);
+
+ ret = request_irq(host->irq, mshci_irq, IRQF_SHARED,
+ mmc_hostname(host->mmc), host);
+ if (ret)
+ return ret;
+
+ mmiowb();
+
+ mshci_fifo_init(host);
+
+ /* set debounce filter value*/
+ mshci_writel(host, 0xfffff, MSHCI_DEBNCE);
+
+ /* clear card type. set 1bit mode */
+ mshci_writel(host, 0x0, MSHCI_CTYPE);
+
+ /* set bus mode register for IDMAC */
+ if (host->flags & MSHCI_USE_IDMA) {
+ mshci_writel(host, BMOD_IDMAC_RESET, MSHCI_BMOD);
+ count = 100;
+ while ((mshci_readl(host, MSHCI_BMOD) & BMOD_IDMAC_RESET)
+ && --count)
+ ; /* nothing to do */
+
+ mshci_writel(host, (mshci_readl(host, MSHCI_BMOD) |
+ (BMOD_IDMAC_ENABLE|BMOD_IDMAC_FB)), MSHCI_BMOD);
+ }
+
+ ret = mmc_resume_host(host->mmc);
+ if (ret)
+ return ret;
+
+ mshci_enable_card_detection(host);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mshci_resume_host);
+
+#endif /* CONFIG_PM */
+
+/*****************************************************************************\
+ * *
+ * Device allocation/registration *
+ * *
+\*****************************************************************************/
+
+struct mshci_host *mshci_alloc_host(struct device *dev,
+ size_t priv_size)
+{
+ struct mmc_host *mmc;
+ struct mshci_host *host;
+
+ WARN_ON(dev == NULL);
+
+ mmc = mmc_alloc_host(sizeof(struct mshci_host) + priv_size, dev);
+ if (!mmc)
+ return ERR_PTR(-ENOMEM);
+
+ host = mmc_priv(mmc);
+ host->mmc = mmc;
+
+ return host;
+}
+
+static void mshci_fifo_init(struct mshci_host *host)
+{
+ int fifo_val, fifo_depth, fifo_threshold;
+
+ fifo_val = mshci_readl(host, MSHCI_FIFOTH);
+ fifo_depth = host->ops->get_fifo_depth(host);
+ fifo_threshold = fifo_depth/2;
+ host->fifo_threshold = fifo_threshold;
+ host->fifo_depth = fifo_threshold*2;
+
+ printk(KERN_INFO "%s: FIFO WMARK FOR RX 0x%x WX 0x%x. ###########\n",
+ mmc_hostname(host->mmc), fifo_depth,
+ fifo_threshold);
+
+ fifo_val &= ~(RX_WMARK | TX_WMARK | MSIZE_MASK);
+
+ fifo_val |= (fifo_threshold | ((fifo_threshold-1)<<16));
+ if (fifo_threshold >= 0x40)
+ fifo_val |= MSIZE_64;
+ else if (fifo_threshold >= 0x20)
+ fifo_val |= MSIZE_32;
+ else if (fifo_threshold >= 0x10)
+ fifo_val |= MSIZE_16;
+ else if (fifo_threshold >= 0x8)
+ fifo_val |= MSIZE_8;
+ else
+ fifo_val |= MSIZE_1;
+
+ mshci_writel(host, fifo_val, MSHCI_FIFOTH);
+}
+EXPORT_SYMBOL_GPL(mshci_alloc_host);
+
+int mshci_add_host(struct mshci_host *host)
+{
+ struct mmc_host *mmc;
+ int ret, count;
+
+ WARN_ON(host == NULL);
+ if (host == NULL)
+ return -EINVAL;
+
+ mmc = host->mmc;
+
+ if (debug_quirks)
+ host->quirks = debug_quirks;
+
+ mshci_reset_all(host);
+
+ host->version = mshci_readl(host, MSHCI_VERID);
+
+ /* there are no reasons not to use DMA */
+ host->flags |= MSHCI_USE_IDMA;
+
+ if (host->flags & MSHCI_USE_IDMA) {
+ /* We need to allocate descriptors for all sg entries
+ * MSHCI_MAX_DMA_LIST transfer for each of those entries. */
+ host->idma_desc = kmalloc(MSHCI_MAX_DMA_LIST * \
+ sizeof(struct mshci_idmac), GFP_KERNEL);
+ if (!host->idma_desc) {
+ kfree(host->idma_desc);
+ printk(KERN_WARNING "%s: Unable to allocate IDMA "
+ "buffers. Falling back to standard DMA.\n",
+ mmc_hostname(mmc));
+ host->flags &= ~MSHCI_USE_IDMA;
+ }
+ }
+
+ /*
+ * If we use DMA, then it's up to the caller to set the DMA
+ * mask, but PIO does not need the hw shim so we set a new
+ * mask here in that case.
+ */
+ if (!(host->flags & (MSHCI_USE_IDMA))) {
+ host->dma_mask = DMA_BIT_MASK(64);
+ mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
+ }
+
+ printk(KERN_ERR "%s: Version ID 0x%x.\n",
+ mmc_hostname(host->mmc), host->version);
+
+ host->max_clk = 0;
+
+ if (host->max_clk == 0) {
+ if (!host->ops->get_max_clock) {
+ printk(KERN_ERR
+ "%s: Hardware doesn't specify base clock "
+ "frequency.\n", mmc_hostname(mmc));
+ return -ENODEV;
+ }
+ host->max_clk = host->ops->get_max_clock(host);
+ }
+
+ /*
+ * Set host parameters.
+ */
+ if (host->ops->get_ro)
+ mshci_ops.get_ro = host->ops->get_ro;
+
+ mmc->ops = &mshci_ops;
+ mmc->f_min = 400000;
+ mmc->f_max = host->max_clk;
+ mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE;
+
+ mmc->caps |= MMC_CAP_4_BIT_DATA;
+
+ mmc->ocr_avail = 0;
+ mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34;
+ mmc->ocr_avail |= MMC_VDD_29_30|MMC_VDD_30_31;
+
+
+ if (mmc->ocr_avail == 0) {
+ printk(KERN_ERR "%s: Hardware doesn't report any "
+ "support voltages.\n", mmc_hostname(mmc));
+ return -ENODEV;
+ }
+
+ spin_lock_init(&host->lock);
+
+ /*
+ * Maximum number of segments. Depends on if the hardware
+ * can do scatter/gather or not.
+ */
+ if (host->flags & MSHCI_USE_IDMA)
+ mmc->max_segs = MSHCI_MAX_DMA_LIST;
+ else /* PIO */
+ mmc->max_segs = MSHCI_MAX_DMA_LIST;
+
+ mmc->max_segs = MSHCI_MAX_DMA_LIST;
+
+ /*
+ * Maximum number of sectors in one transfer. Limited by DMA boundary
+ * size (4KiB).
+ * Limited by CPU I/O boundry size (0xfffff000 KiB)
+ */
+
+ /* to prevent starvation of a process that want to access SD device
+ * it should limit size that transfer at one time. */
+ mmc->max_req_size = MSHCI_MAX_DMA_TRANS_SIZE;
+
+ /*
+ * Maximum segment size. Could be one segment with the maximum number
+ * of bytes. When doing hardware scatter/gather, each entry cannot
+ * be larger than 4 KiB though.
+ */
+ if (host->flags & MSHCI_USE_IDMA)
+ mmc->max_seg_size = 0x1000;
+ else
+ mmc->max_seg_size = mmc->max_req_size;
+
+ /* from SD spec 2.0 and MMC spec 4.2, block size has been
+ * fixed to 512 byte */
+ mmc->max_blk_size = 0;
+
+ mmc->max_blk_size = 512 << mmc->max_blk_size;
+
+ /*
+ * Maximum block count.
+ */
+ mmc->max_blk_count = MSHCI_MAX_DMA_TRANS_SIZE / mmc->max_blk_size ;
+
+ /*
+ * Init tasklets.
+ */
+ tasklet_init(&host->card_tasklet,
+ mshci_tasklet_card, (unsigned long)host);
+ tasklet_init(&host->finish_tasklet,
+ mshci_tasklet_finish, (unsigned long)host);
+
+ setup_timer(&host->timer, mshci_timeout_timer, (unsigned long)host);
+
+ ret = request_irq(host->irq, mshci_irq, IRQF_SHARED,
+ mmc_hostname(mmc), host);
+ if (ret)
+ goto untasklet;
+
+ mshci_init(host);
+
+ mshci_writel(host, (mshci_readl(host, MSHCI_CTRL) | INT_ENABLE),
+ MSHCI_CTRL);
+
+ mshci_fifo_init(host);
+
+ /* set debounce filter value*/
+ mshci_writel(host, 0xfffff, MSHCI_DEBNCE);
+
+ /* clear card type. set 1bit mode */
+ mshci_writel(host, 0x0, MSHCI_CTYPE);
+
+ /* set bus mode register for IDMAC */
+ if (host->flags & MSHCI_USE_IDMA) {
+ mshci_writel(host, BMOD_IDMAC_RESET, MSHCI_BMOD);
+ count = 100;
+ while ((mshci_readl(host, MSHCI_BMOD) & BMOD_IDMAC_RESET)
+ && --count)
+ ; /* nothing to do */
+
+ mshci_writel(host, (mshci_readl(host, MSHCI_BMOD) |
+ (BMOD_IDMAC_ENABLE|BMOD_IDMAC_FB)), MSHCI_BMOD);
+ }
+#ifdef CONFIG_MMC_DEBUG
+ mshci_dumpregs(host);
+#endif
+
+ mmiowb();
+
+ mmc_add_host(mmc);
+
+ printk(KERN_INFO "%s: MSHCI controller on %s [%s] using %s\n",
+ mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
+ (host->flags & MSHCI_USE_IDMA) ? "IDMA" : "PIO");
+
+ mshci_enable_card_detection(host);
+
+ return 0;
+
+untasklet:
+ tasklet_kill(&host->card_tasklet);
+ tasklet_kill(&host->finish_tasklet);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mshci_add_host);
+
+void mshci_remove_host(struct mshci_host *host, int dead)
+{
+ if (dead) {
+ spin_lock_irqsave(&host->lock, host->sl_flags);
+
+ host->flags |= MSHCI_DEVICE_DEAD;
+
+ if (host->mrq) {
+ printk(KERN_ERR "%s: Controller removed during "
+ " transfer!\n", mmc_hostname(host->mmc));
+
+ host->mrq->cmd->error = -ENOMEDIUM;
+ tasklet_schedule(&host->finish_tasklet);
+ }
+
+ spin_unlock_irqrestore(&host->lock, host->sl_flags);
+ }
+
+ mshci_disable_card_detection(host);
+
+ mmc_remove_host(host->mmc);
+
+ if (!dead)
+ mshci_reset_all(host);
+
+ free_irq(host->irq, host);
+
+ del_timer_sync(&host->timer);
+
+ tasklet_kill(&host->card_tasklet);
+ tasklet_kill(&host->finish_tasklet);
+
+ kfree(host->idma_desc);
+
+ host->idma_desc = NULL;
+ host->align_buffer = NULL;
+}
+EXPORT_SYMBOL_GPL(mshci_remove_host);
+
+void mshci_free_host(struct mshci_host *host)
+{
+ mmc_free_host(host->mmc);
+}
+EXPORT_SYMBOL_GPL(mshci_free_host);
+
+/*****************************************************************************\
+ * *
+ * Driver init/exit *
+ * *
+\*****************************************************************************/
+
+static int __init mshci_drv_init(void)
+{
+ int ret = 0;
+ printk(KERN_INFO DRIVER_NAME
+ ": Mobile Storage Host Controller Interface driver\n");
+ printk(KERN_INFO DRIVER_NAME
+ ": Copyright (c) 2011 Samsung Electronics Co., Ltd\n");
+
+ return ret;
+}
+
+static void __exit mshci_drv_exit(void)
+{
+}
+
+module_init(mshci_drv_init);
+module_exit(mshci_drv_exit);
+
+module_param(debug_quirks, uint, 0444);
+
+MODULE_AUTHOR("Hyunsung Jang, <hs79.jang@samsung.com>");
+MODULE_DESCRIPTION("Mobile Storage Host Controller Interface core driver");
+MODULE_LICENSE("GPL");
+
+MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
diff --git a/drivers/mmc/host/mshci.h b/drivers/mmc/host/mshci.h
new file mode 100644
index 0000000..40212ae
--- /dev/null
+++ b/drivers/mmc/host/mshci.h
@@ -0,0 +1,463 @@
+/*
+* linux/drivers/mmc/host/mshci.h
+* Mobile Storage Host Controller Interface driver
+*
+* Copyright (c) 2011 Samsung Electronics Co., Ltd.
+* http://www.samsung.com
+*
+* Based on linux/drivers/mmc/host/sdhci.h
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or (at
+* your option) any later version.
+*
+*/
+
+#include <linux/scatterlist.h>
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/io.h>
+
+/*
+ * Controller registers
+ */
+/*****************************************************/
+/* MSHC Internal Registers */
+/*****************************************************/
+
+#define MSHCI_CTRL 0x00 /* Control */
+#define MSHCI_PWREN 0x04 /* Power-enable */
+#define MSHCI_CLKDIV 0x08 /* Clock divider */
+#define MSHCI_CLKSRC 0x0C /* Clock source */
+#define MSHCI_CLKENA 0x10 /* Clock enable */
+#define MSHCI_TMOUT 0x14 /* Timeout */
+#define MSHCI_CTYPE 0x18 /* Card type */
+#define MSHCI_BLKSIZ 0x1C /* Block Size */
+#define MSHCI_BYTCNT 0x20 /* Byte count */
+#define MSHCI_INTMSK 0x24 /* Interrupt Mask */
+#define MSHCI_CMDARG 0x28 /* Command Argument */
+#define MSHCI_CMD 0x2C /* Command */
+#define MSHCI_RESP0 0x30 /* Response 0 */
+#define MSHCI_RESP1 0x34 /* Response 1 */
+#define MSHCI_RESP2 0x38 /* Response 2 */
+#define MSHCI_RESP3 0x3C /* Response 3 */
+#define MSHCI_MINTSTS 0x40 /* Masked interrupt status */
+#define MSHCI_RINTSTS 0x44 /* Raw interrupt status */
+#define MSHCI_STATUS 0x48 /* Status */
+#define MSHCI_FIFOTH 0x4C /* FIFO threshold */
+#define MSHCI_CDETECT 0x50 /* Card detect */
+#define MSHCI_WRTPRT 0x54 /* Write protect */
+#define MSHCI_GPIO 0x58 /* General Purpose IO */
+#define MSHCI_TCBCNT 0x5C /* Transferred CIU byte count */
+#define MSHCI_TBBCNT 0x60 /* Transferred host/DMA to/from byte count */
+#define MSHCI_DEBNCE 0x64 /* Card detect debounce */
+#define MSHCI_USRID 0x68 /* User ID */
+#define MSHCI_VERID 0x6C /* Version ID */
+#define MSHCI_HCON 0x70 /* Hardware Configuration */
+#define MSHCI_UHS_REG 0x74 /* UHS and DDR setting */
+#define MSHCI_BMOD 0x80 /* Bus mode register */
+#define MSHCI_PLDMND 0x84 /* Poll demand */
+#define MSHCI_DBADDR 0x88 /* Descriptor list base address */
+#define MSHCI_IDSTS 0x8C /* Internal DMAC status */
+#define MSHCI_IDINTEN 0x90 /* Internal DMAC interrupt enable */
+#define MSHCI_DSCADDR 0x94 /* Current host descriptor address */
+#define MSHCI_BUFADDR 0x98 /* Current host buffer address */
+#define MSHCI_CLKSEL 0x9C /* Clock Selection Register */
+#define MSHCI_WAKEUPCON 0xA0 /* Wakeup control register */
+#define MSHCI_CLOCKCON 0xA4 /* Clock (delay) control register */
+#define MSHCI_FIFODAT 0x100 /* FIFO data read write */
+
+/*****************************************************
+ * Control Register Register
+ * MSHCI_CTRL - offset 0x00
+ *****************************************************/
+
+#define CTRL_RESET (0x1<<0) /* Reset DWC_mobile_storage controller */
+#define FIFO_RESET (0x1<<1) /* Reset FIFO */
+#define DMA_RESET (0x1<<2) /* Reset DMA interface */
+#define INT_ENABLE (0x1<<4) /* Global interrupt enable/disable bit */
+#define DMA_ENABLE (0x1<<5) /* DMA transfer mode enable/disable bit */
+#define READ_WAIT (0x1<<6) /* For sending read-wait to SDIO cards */
+#define SEND_IRQ_RESP (0x1<<7) /* Send auto IRQ response */
+#define ABRT_READ_DATA (0x1<<8)
+#define SEND_CCSD (0x1<<9)
+#define SEND_AS_CCSD (0x1<<10)
+#define CEATA_INTSTAT (0x1<<11)
+#define CARD_VOLA (0xF<<16)
+#define CARD_VOLB (0xF<<20)
+#define ENABLE_OD_PULLUP (0x1<<24)
+#define ENABLE_IDMAC (0x1<<25)
+
+#define MSHCI_RESET_ALL (0x1)
+
+/*****************************************************
+ * Power Enable Register
+ * MSHCI_PWREN - offset 0x04
+ *****************************************************/
+#define POWER_ENABLE (0x1<<0)
+
+/*****************************************************
+ * Clock Divider Register
+ * MSHCI_CLKDIV - offset 0x08
+ *****************************************************/
+#define CLK_DIVIDER0 (0xFF<<0)
+#define CLK_DIVIDER1 (0xFF<<8)
+#define CLK_DIVIDER2 (0xFF<<16)
+#define CLK_DIVIDER3 (0xFF<<24)
+
+/*****************************************************
+ * Clock Enable Register
+ * MSHCI_CLKENA - offset 0x10
+ *****************************************************/
+#define CLK_SDMMC_MAX (48000000) /* 96Mhz. it SHOULDBE optimized */
+#define CLK_ENABLE (0x1<<0)
+#define CLK_DISABLE (0x0<<0)
+
+/*****************************************************
+ * Timeout Register
+ * MSHCI_TMOUT - offset 0x14
+ *****************************************************/
+#define RSP_TIMEOUT (0xFF<<0)
+#define DATA_TIMEOUT (0xFFFFFF<<8)
+
+/*****************************************************
+ * Card Type Register
+ * MSHCI_CTYPE - offset 0x18
+ *****************************************************/
+#define CARD_WIDTH4 (0xFFFF<<0)
+#define CARD_WIDTH8 (0xFFFF<<16)
+
+/*****************************************************
+ * Block Size Register
+ * MSHCI_BLKSIZ - offset 0x1C
+ *****************************************************/
+#define BLK_SIZ (0xFFFF<<0)
+
+/*****************************************************
+ * Interrupt Mask Register
+ * MSHCI_INTMSK - offset 0x24
+ *****************************************************/
+#define INT_MASK (0xFFFF<<0)
+#define SDIO_INT_MASK (0xFFFF<<16)
+#define SDIO_INT_ENABLE (0x1<<16)
+
+/* interrupt bits */
+#define INTMSK_ALL 0xFFFFFFFF
+#define INTMSK_CDETECT (0x1<<0)
+#define INTMSK_RE (0x1<<1)
+#define INTMSK_CDONE (0x1<<2)
+#define INTMSK_DTO (0x1<<3)
+#define INTMSK_TXDR (0x1<<4)
+#define INTMSK_RXDR (0x1<<5)
+#define INTMSK_RCRC (0x1<<6)
+#define INTMSK_DCRC (0x1<<7)
+#define INTMSK_RTO (0x1<<8)
+#define INTMSK_DRTO (0x1<<9)
+#define INTMSK_HTO (0x1<<10)
+#define INTMSK_FRUN (0x1<<11)
+#define INTMSK_HLE (0x1<<12)
+#define INTMSK_SBE (0x1<<13)
+#define INTMSK_ACD (0x1<<14)
+#define INTMSK_EBE (0x1<<15)
+#define INTMSK_DMA (INTMSK_ACD|INTMSK_RXDR|INTMSK_TXDR)
+
+#define INT_SRC_IDMAC (0x0)
+#define INT_SRC_MINT (0x1)
+
+
+/*****************************************************
+ * Command Register
+ * MSHCI_CMD - offset 0x2C
+ *****************************************************/
+
+#define CMD_RESP_EXP_BIT (0x1<<6)
+#define CMD_RESP_LENGTH_BIT (0x1<<7)
+#define CMD_CHECK_CRC_BIT (0x1<<8)
+#define CMD_DATA_EXP_BIT (0x1<<9)
+#define CMD_RW_BIT (0x1<<10)
+#define CMD_TRANSMODE_BIT (0x1<<11)
+#define CMD_SENT_AUTO_STOP_BIT (0x1<<12)
+#define CMD_WAIT_PRV_DAT_BIT (0x1<<13)
+#define CMD_ABRT_CMD_BIT (0x1<<14)
+#define CMD_SEND_INIT_BIT (0x1<<15)
+#define CMD_CARD_NUM_BITS (0x1F<<16)
+#define CMD_SEND_CLK_ONLY (0x1<<21)
+#define CMD_READ_CEATA (0x1<<22)
+#define CMD_CCS_EXPECTED (0x1<<23)
+#define CMD_USE_HOLD_REG (0x1<<29)
+#define CMD_STRT_BIT (0x1<<31)
+#define CMD_ONLY_CLK (CMD_STRT_BIT | CMD_SEND_CLK_ONLY | \
+ CMD_WAIT_PRV_DAT_BIT)
+
+/*****************************************************
+ * Masked Interrupt Status Register
+ * MSHCI_MINTSTS - offset 0x40
+ *****************************************************/
+/*****************************************************
+ * Raw Interrupt Register
+ * MSHCI_RINTSTS - offset 0x44
+ *****************************************************/
+#define INT_STATUS (0xFFFF<<0)
+#define SDIO_INTR (0xFFFF<<16)
+#define DATA_ERR (INTMSK_EBE|INTMSK_SBE|INTMSK_HLE|INTMSK_FRUN|\
+ INTMSK_EBE|INTMSK_DCRC)
+#define DATA_TOUT (INTMSK_HTO|INTMSK_DRTO)
+#define DATA_STATUS (DATA_ERR|DATA_TOUT|INTMSK_RXDR|INTMSK_TXDR|INTMSK_DTO)
+#define CMD_STATUS (INTMSK_RTO|INTMSK_RCRC|INTMSK_CDONE|INTMSK_RE)
+#define CMD_ERROR (INTMSK_RCRC|INTMSK_RTO|INTMSK_RE)
+
+/*****************************************************
+ * Status Register
+ * MSHCI_STATUS - offset 0x48
+ *****************************************************/
+#define FIFO_RXWTRMARK (0x1<<0)
+#define FIFO_TXWTRMARK (0x1<<1)
+#define FIFO_EMPTY (0x1<<2)
+#define FIFO_FULL (0x1<<3)
+#define CMD_FSMSTAT (0xF<<4)
+#define DATA_3STATUS (0x1<<8)
+#define DATA_BUSY (0x1<<9)
+#define DATA_MCBUSY (0x1<<10)
+#define RSP_INDEX (0x3F<<11)
+#define FIFO_COUNT (0x1FFF<<17)
+#define DMA_ACK (0x1<<30)
+#define DMA_REQ (0x1<<31)
+#define FIFO_WIDTH (0x4)
+#define FIFO_DEPTH (0x20)
+
+/*Command FSM status */
+#define FSM_IDLE (0<<4)
+#define FSM_SEND_INIT_SEQ (1<<4)
+#define FSM_TX_CMD_STARTBIT (2<<4)
+#define FSM_TX_CMD_TXBIT (3<<4)
+#define FSM_TX_CMD_INDEX_ARG (4<<4)
+#define FSM_TX_CMD_CRC7 (5<<4)
+#define FSM_TX_CMD_ENDBIT (6<<4)
+#define FSM_RX_RESP_STARTBIT (7<<4)
+#define FSM_RX_RESP_IRQRESP (8<<4)
+#define FSM_RX_RESP_TXBIT (9<<4)
+#define FSM_RX_RESP_CMDIDX (10<<4)
+#define FSM_RX_RESP_DATA (11<<4)
+#define FSM_RX_RESP_CRC7 (12<<4)
+#define FSM_RX_RESP_ENDBIT (13<<4)
+#define FSM_CMD_PATHWAITNCC (14<<4)
+#define FSM_WAIT (15<<4)
+
+/*****************************************************
+ * FIFO Threshold Watermark Register
+ * MSHCI_FIFOTH - offset 0x4C
+ *****************************************************/
+#define TX_WMARK (0xFFF<<0)
+#define RX_WMARK (0xFFF<<16)
+#define MSIZE_MASK (0x7<<28)
+
+/* DW DMA Mutiple Transaction Size */
+#define MSIZE_1 (0<<28)
+#define MSIZE_4 (1<<28)
+#define MSIZE_8 (2<<28)
+#define MSIZE_16 (3<<28)
+#define MSIZE_32 (4<<28)
+#define MSIZE_64 (5<<28)
+#define MSIZE_128 (6<<28)
+#define MSIZE_256 (7<<28)
+
+/*****************************************************
+ * FIFO Threshold Watermark Register
+ * MSHCI_FIFOTH - offset 0x4C
+ *****************************************************/
+#define GPI (0xFF<<0)
+#define GPO (0xFFFF<<8)
+
+
+/*****************************************************
+ * Card Detect Register
+ * MSHCI_CDETECT - offset 0x50
+ * It assumes there is only one SD slot
+ *****************************************************/
+#define CARD_PRESENT (0x1<<0)
+
+/*****************************************************
+ * Write Protect Register
+ * MSHCI_WRTPRT - offset 0x54
+ * It assumes there is only one SD slot
+ *****************************************************/
+#define WRTPRT_ON (0x1<<0)
+
+/*****************************************************
+ * Bus Mode Register
+ * MSHCI_BMOD - offset 0x80
+ *****************************************************/
+#define BMOD_IDMAC_RESET (0x1<<0)
+#define BMOD_IDMAC_FB (0x1<<1)
+#define BMOD_IDMAC_ENABLE (0x1<<7)
+
+/*****************************************************
+ * Hardware Configuration Register
+ * MSHCI_HCON - offset 0x70
+ *****************************************************/
+#define CARD_TYPE (0x1<<0)
+#define NUM_CARDS (0x1F<<1)
+#define H_BUS_TYPE (0x1<<6)
+#define H_DATA_WIDTH (0x7<<7)
+#define H_ADDR_WIDTH (0x3F<<10)
+#define DMA_INTERFACE (0x3<<16)
+#define GE_DMA_DATA_WIDTH (0x7<<18)
+#define FIFO_RAM_INSIDE (0x1<<21)
+#define UMPLEMENT_HOLD_REG (0x1<<22)
+#define SET_CLK_FALSE_PATH (0x1<<23)
+#define NUM_CLK_DIVIDER (0x3<<24)
+
+/*****************************************************
+ * Hardware Configuration Register
+ * MSHCI_IDSTS - offset 0x8c
+ *****************************************************/
+#define IDSTS_FSM (0xf<<13)
+#define IDSTS_EB (0x7<<10)
+#define IDSTS_AIS (0x1<<9)
+#define IDSTS_NIS (0x1<<8)
+#define IDSTS_CES (0x1<<5)
+#define IDSTS_DU (0x1<<4)
+#define IDSTS_FBE (0x1<<2)
+#define IDSTS_RI (0x1<<1)
+#define IDSTS_TI (0x1<<0)
+
+struct mshci_ops;
+
+struct mshci_idmac {
+ u32 des0;
+ u32 des1;
+ u32 des2;
+ u32 des3;
+#define MSHCI_IDMAC_OWN (1<<31)
+#define MSHCI_IDMAC_ER (1<<5)
+#define MSHCI_IDMAC_CH (1<<4)
+#define MSHCI_IDMAC_FS (1<<3)
+#define MSHCI_IDMAC_LD (1<<2)
+#define MSHCI_IDMAC_DIC (1<<1)
+#define INTMSK_IDMAC_ALL (0x337)
+#define INTMSK_IDMAC_ERROR (0x214)
+};
+
+struct mshci_host {
+ /* Data set by hardware interface driver */
+ const char *hw_name; /* Hardware bus name */
+
+ unsigned int quirks; /* Deviations from spec. */
+/* Controller has no write-protect pin connected with SD card */
+#define MSHCI_QUIRK_NO_WP_BIT (1<<0)
+#define MSHCI_QUIRK_BROKEN_CARD_DETECTION (1<<1)
+#define MSHCI_QUIRK_BROKEN_PRESENT_BIT (1<<2)
+
+ int irq; /* Device IRQ */
+ void __iomem *ioaddr; /* Mapped address */
+
+ const struct mshci_ops *ops; /* Low level hw interface */
+
+ /* Internal data */
+ struct mmc_host *mmc; /* MMC structure */
+ u64 dma_mask; /* custom DMA mask */
+
+ spinlock_t lock; /* Mutex */
+
+ int flags; /* Host attributes */
+#define MSHCI_USE_IDMA (1<<1) /* Host is ADMA capable */
+#define MSHCI_REQ_USE_DMA (1<<2) /* Use DMA for this req. */
+#define MSHCI_DEVICE_DEAD (1<<3) /* Device unresponsive */
+
+ unsigned int version; /* SDHCI spec. version */
+
+ unsigned int max_clk; /* Max possible freq (MHz) */
+ unsigned int timeout_clk; /* Timeout freq (KHz) */
+
+ unsigned int clock; /* Current clock (MHz) */
+ unsigned int clock_to_restore; /* Saved clock for dynamic clock gating (MHz) */
+ u8 pwr; /* Current voltage */
+
+ struct mmc_request *mrq; /* Current request */
+ struct mmc_command *cmd; /* Current command */
+ struct mmc_data *data; /* Current data request */
+ unsigned int data_early:1; /* Data finished before cmd */
+
+ struct sg_mapping_iter sg_miter; /* SG state for PIO */
+ unsigned int blocks; /* remaining PIO blocks */
+
+ int sg_count; /* Mapped sg entries */
+
+ u8 *idma_desc; /* ADMA descriptor table */
+ u8 *align_buffer; /* Bounce buffer */
+
+ dma_addr_t idma_addr; /* Mapped ADMA descr. table */
+ dma_addr_t align_addr; /* Mapped bounce buffer */
+
+ struct tasklet_struct card_tasklet; /* Tasklet structures */
+ struct tasklet_struct finish_tasklet;
+
+ struct timer_list timer; /* Timer for timeouts */
+
+ u32 fifo_depth;
+ u32 fifo_threshold;
+ u32 data_transfered;
+
+ /* IP version control */
+ u32 data_addr;
+ u32 hold_bit;
+
+ u32 error_state;
+
+ unsigned long sl_flags;
+ unsigned long private[0] ____cacheline_aligned;
+};
+
+struct mshci_ops {
+ void (*set_clock)(struct mshci_host *host, unsigned int clock);
+
+ int (*enable_dma)(struct mshci_host *host);
+ unsigned int (*get_max_clock)(struct mshci_host *host);
+ unsigned int (*get_min_clock)(struct mshci_host *host);
+ unsigned int (*get_timeout_clock)(struct mshci_host *host);
+ void (*set_ios)(struct mshci_host *host,
+ struct mmc_ios *ios);
+ int (*get_ro) (struct mmc_host *mmc);
+ void (*init_issue_cmd)(struct mshci_host *host);
+ void (*init_card)(struct mshci_host *host);
+
+ int (*dma_map_sg)(struct mshci_host *host,
+ struct device *dev,
+ struct scatterlist *sg,
+ int nents, enum dma_data_direction dir,
+ int flush_type);
+ void (*dma_unmap_sg)(struct mshci_host *host,
+ struct device *dev,
+ struct scatterlist *sg,
+ int nents, enum dma_data_direction dir,
+ int flush_type);
+ int (*get_fifo_depth)(struct mshci_host *host);
+};
+
+static inline void mshci_writel(struct mshci_host *host, u32 val, int reg)
+{
+ __raw_writel(val, host->ioaddr + reg);
+}
+
+static inline u32 mshci_readl(struct mshci_host *host, int reg)
+{
+ return readl(host->ioaddr + reg);
+}
+
+extern struct mshci_host *mshci_alloc_host(struct device *dev,
+ size_t priv_size);
+extern void mshci_free_host(struct mshci_host *host);
+
+static inline void *mshci_priv(struct mshci_host *host)
+{
+ return (void *)host->private;
+}
+
+extern int mshci_add_host(struct mshci_host *host);
+extern void mshci_remove_host(struct mshci_host *host, int dead);
+
+#ifdef CONFIG_PM
+extern int mshci_suspend_host(struct mshci_host *host, pm_message_t state);
+extern int mshci_resume_host(struct mshci_host *host);
+#endif
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 8cd999f..5a81b34 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -24,6 +24,7 @@
#include <plat/sdhci.h>
#include <plat/regs-sdhci.h>
+#include <plat/gpio-cfg.h>
#include "sdhci.h"
@@ -47,6 +48,7 @@ struct sdhci_s3c {
unsigned int cur_clk;
int ext_cd_irq;
int ext_cd_gpio;
+ int ext_cd_gpio_invert;
struct clk *clk_io;
struct clk *clk_bus[MAX_BUS_CLK];
@@ -212,6 +214,12 @@ static void sdhci_s3c_set_clock(struct sdhci_host *host, unsigned int clock)
if (ourhost->pdata->cfg_card)
(ourhost->pdata->cfg_card)(ourhost->pdev, host->ioaddr,
&ios, NULL);
+#ifdef CONFIG_MACH_MIDAS
+ /* call cfg_gpio with 4bit data bus */
+ if (ourhost->pdata->cfg_gpio)
+ ourhost->pdata->cfg_gpio(ourhost->pdev, 4);
+
+#endif
}
}
@@ -288,6 +296,7 @@ static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock)
static int sdhci_s3c_platform_8bit_width(struct sdhci_host *host, int width)
{
u8 ctrl;
+ struct sdhci_s3c *ourhost = to_s3c(host);
ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
@@ -295,14 +304,23 @@ static int sdhci_s3c_platform_8bit_width(struct sdhci_host *host, int width)
case MMC_BUS_WIDTH_8:
ctrl |= SDHCI_CTRL_8BITBUS;
ctrl &= ~SDHCI_CTRL_4BITBUS;
+ /* call cfg_gpio with 8bit data bus */
+ if (ourhost->pdata->cfg_gpio)
+ ourhost->pdata->cfg_gpio(ourhost->pdev, 8);
break;
case MMC_BUS_WIDTH_4:
ctrl |= SDHCI_CTRL_4BITBUS;
ctrl &= ~SDHCI_CTRL_8BITBUS;
+ /* call cfg_gpio with 4bit data bus */
+ if (ourhost->pdata->cfg_gpio)
+ ourhost->pdata->cfg_gpio(ourhost->pdev, 4);
break;
default:
- ctrl &= ~SDHCI_CTRL_4BITBUS;
ctrl &= ~SDHCI_CTRL_8BITBUS;
+ ctrl &= ~SDHCI_CTRL_4BITBUS;
+ /* call cfg_gpio with 1bit data bus */
+ if (ourhost->pdata->cfg_gpio)
+ ourhost->pdata->cfg_gpio(ourhost->pdev, 1);
break;
}
@@ -311,11 +329,49 @@ static int sdhci_s3c_platform_8bit_width(struct sdhci_host *host, int width)
return 0;
}
+#ifdef CONFIG_MIDAS_COMMON
+/* midas board control the vdd for tflash by gpio,
+ not regulator directly.
+ so, code related vdd control should be added */
+static void sdhci_s3c_vtf_on_off(int on_off)
+{
+#ifdef CONFIG_MIDAS_COMMON
+ int gpio = GPIO_TF_EN;
+#else
+ int gpio = EXYNOS4212_GPJ0(7);
+#endif
+
+ if (on_off) {
+ gpio_set_value(gpio, 1);
+ } else {
+ gpio_set_value(gpio, 0);
+ }
+}
+
+
+static int sdhci_s3c_get_card_exist(struct sdhci_host *host)
+{
+ struct sdhci_s3c *sc;
+ int status;
+
+ sc = sdhci_priv(host);
+
+ status = gpio_get_value(sc->ext_cd_gpio);
+ if (sc->pdata->ext_cd_gpio_invert)
+ status = !status;
+
+ return status;
+}
+#endif
+
static struct sdhci_ops sdhci_s3c_ops = {
.get_max_clock = sdhci_s3c_get_max_clk,
.set_clock = sdhci_s3c_set_clock,
.get_min_clock = sdhci_s3c_get_min_clock,
.platform_8bit_width = sdhci_s3c_platform_8bit_width,
+#ifdef CONFIG_MIDAS_COMMON
+ .set_power = sdhci_s3c_vtf_on_off,
+#endif
};
static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
@@ -327,12 +383,20 @@ static void sdhci_s3c_notify_change(struct platform_device *dev, int state)
spin_lock_irqsave(&host->lock, flags);
if (state) {
dev_dbg(&dev->dev, "card inserted.\n");
- host->flags &= ~SDHCI_DEVICE_DEAD;
+ pr_info("%s: card inserted.\n",
+ mmc_hostname(host->mmc));
host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+#ifdef CONFIG_MACH_MIDAS_01_BD
+ sdhci_s3c_vtf_on_off(1);
+#endif
} else {
dev_dbg(&dev->dev, "card removed.\n");
- host->flags |= SDHCI_DEVICE_DEAD;
+ pr_info("%s: card removed.\n",
+ mmc_hostname(host->mmc));
host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+#ifdef CONFIG_MACH_MIDAS_01_BD
+ sdhci_s3c_vtf_on_off(0);
+#endif
}
tasklet_schedule(&host->card_tasklet);
spin_unlock_irqrestore(&host->lock, flags);
@@ -345,6 +409,17 @@ static irqreturn_t sdhci_s3c_gpio_card_detect_thread(int irq, void *dev_id)
int status = gpio_get_value(sc->ext_cd_gpio);
if (sc->pdata->ext_cd_gpio_invert)
status = !status;
+
+ if (sc->host->mmc) {
+ if (status)
+ mmc_host_sd_set_present(sc->host->mmc);
+ else
+ mmc_host_sd_clear_present(sc->host->mmc);
+
+ pr_debug("SDcard present state=%d.\n",
+ mmc_host_sd_present(sc->host->mmc));
+ }
+
sdhci_s3c_notify_change(sc->pdev, status);
return IRQ_HANDLED;
}
@@ -354,8 +429,7 @@ static void sdhci_s3c_setup_card_detect_gpio(struct sdhci_s3c *sc)
struct s3c_sdhci_platdata *pdata = sc->pdata;
struct device *dev = &sc->pdev->dev;
- if (gpio_request(pdata->ext_cd_gpio, "SDHCI EXT CD") == 0) {
- sc->ext_cd_gpio = pdata->ext_cd_gpio;
+ if (sc->ext_cd_gpio > 0) {
sc->ext_cd_irq = gpio_to_irq(pdata->ext_cd_gpio);
if (sc->ext_cd_irq &&
request_threaded_irq(sc->ext_cd_irq, NULL,
@@ -365,16 +439,56 @@ static void sdhci_s3c_setup_card_detect_gpio(struct sdhci_s3c *sc)
int status = gpio_get_value(sc->ext_cd_gpio);
if (pdata->ext_cd_gpio_invert)
status = !status;
+
+ if (status)
+ mmc_host_sd_set_present(sc->host->mmc);
+ else
+ mmc_host_sd_clear_present(sc->host->mmc);
+
+ /* T-Flash EINT for CD SHOULD be wakeup source */
+ irq_set_irq_wake(sc->ext_cd_irq, 1);
+
sdhci_s3c_notify_change(sc->pdev, status);
} else {
dev_warn(dev, "cannot request irq for card detect\n");
sc->ext_cd_irq = 0;
}
+ }
+}
+
+extern struct class *sec_class;
+static struct device *sd_detection_cmd_dev;
+
+static ssize_t sd_detection_cmd_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sdhci_s3c *sc = dev_get_drvdata(dev);
+ unsigned int detect;
+
+ if (sc && sc->ext_cd_gpio)
+ detect = gpio_get_value(sc->ext_cd_gpio);
+ else {
+ pr_info("%s : External SD detect pin Error\n", __func__);
+ return sprintf(buf, "Error\n");
+ }
+
+ if (sc->pdata->ext_cd_gpio_invert) {
+ pr_info("%s : Invert External SD detect pin\n", __func__);
+ detect = !detect;
+ }
+
+ pr_info("%s : detect = %d.\n", __func__, detect);
+ if (detect) {
+ pr_debug("sdhci: card inserted.\n");
+ return sprintf(buf, "Insert\n");
} else {
- dev_err(dev, "cannot request gpio for card detect\n");
+ pr_debug("sdhci: card removed.\n");
+ return sprintf(buf, "Remove\n");
}
}
+static DEVICE_ATTR(status, 0444, sd_detection_cmd_show, NULL);
+
static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
{
struct s3c_sdhci_platdata *pdata = pdev->dev.platform_data;
@@ -501,11 +615,7 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
* SDHCI block, or a missing configuration that needs to be set. */
host->quirks |= SDHCI_QUIRK_NO_BUSY_IRQ;
- /* This host supports the Auto CMD12 */
- host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
-
- if (pdata->cd_type == S3C_SDHCI_CD_NONE ||
- pdata->cd_type == S3C_SDHCI_CD_PERMANENT)
+ if (pdata->cd_type == S3C_SDHCI_CD_NONE)
host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
if (pdata->cd_type == S3C_SDHCI_CD_PERMANENT)
@@ -514,6 +624,10 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
if (pdata->host_caps)
host->mmc->caps |= pdata->host_caps;
+ /* if vmmc_name is in pdata */
+ if (pdata->vmmc_name)
+ host->vmmc_name = pdata->vmmc_name;
+
host->quirks |= (SDHCI_QUIRK_32BIT_DMA_ADDR |
SDHCI_QUIRK_32BIT_DMA_SIZE);
@@ -534,21 +648,88 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
if (pdata->host_caps)
host->mmc->caps |= pdata->host_caps;
+ /* for BCM WIFI */
+ if (pdata->pm_flags)
+ host->mmc->pm_flags |= pdata->pm_flags;
+
+#ifdef CONFIG_MACH_MIDAS_01_BD
+ /* before calling shhci_add_host, you should turn vdd_tflash on */
+ sdhci_s3c_vtf_on_off(1);
+#endif
+
+ /* To turn on vmmc regulator only if sd card exists,
+ GPIO pin for card detection should be initialized.
+ Moved from sdhci_s3c_setup_card_detect_gpio() function */
+ if (pdata->cd_type == S3C_SDHCI_CD_GPIO &&
+ gpio_is_valid(pdata->ext_cd_gpio)) {
+ if (gpio_request(pdata->ext_cd_gpio, "SDHCI EXT CD") == 0) {
+ sc->ext_cd_gpio = pdata->ext_cd_gpio;
+ sc->ext_cd_gpio_invert = pdata->ext_cd_gpio_invert;
+
+ mmc_host_sd_set_present(host->mmc);
+ if (sd_detection_cmd_dev == NULL &&
+ sc->ext_cd_gpio) {
+ sd_detection_cmd_dev =
+ device_create(sec_class, NULL, 0,
+ NULL, "sdcard");
+ if (IS_ERR(sd_detection_cmd_dev))
+ pr_err("Fail to create sysfs dev\n");
+
+ if (device_create_file(sd_detection_cmd_dev,
+ &dev_attr_status) < 0)
+ pr_err("Fail to create sysfs file\n");
+
+ dev_set_drvdata(sd_detection_cmd_dev, sc);
+ }
+#ifdef CONFIG_MIDAS_COMMON
+ /* set TF_EN gpio as OUTPUT */
+ gpio_request(GPIO_TF_EN, "TF_EN");
+ gpio_direction_output(GPIO_TF_EN, 1);
+ s3c_gpio_cfgpin(GPIO_TF_EN, S3C_GPIO_SFN(1));
+ s3c_gpio_setpull(GPIO_TF_EN, S3C_GPIO_PULL_NONE);
+#endif
+ } else {
+ dev_err(dev, "cannot request gpio for card detect\n");
+ }
+ }
+
ret = sdhci_add_host(host);
if (ret) {
dev_err(dev, "sdhci_add_host() failed\n");
goto err_add_host;
}
+ /* if it is set SDHCI_QUIRK_BROKEN_CARD_DETECTION before calling
+ sdhci_add_host, in sdhci_add_host, MMC_CAP_NEEDS_POLL flag will
+ be set. The flag S3C_SDHCI_CD_PERMANENT dose not need to
+ detect a card by polling. */
+ if (pdata->cd_type == S3C_SDHCI_CD_PERMANENT || \
+ pdata->cd_type == S3C_SDHCI_CD_GPIO)
+ host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
+
/* The following two methods of card detection might call
sdhci_s3c_notify_change() immediately, so they can be called
only after sdhci_add_host(). Setup errors are ignored. */
- if (pdata->cd_type == S3C_SDHCI_CD_EXTERNAL && pdata->ext_cd_init)
+ if (pdata->cd_type == S3C_SDHCI_CD_EXTERNAL && pdata->ext_cd_init) {
pdata->ext_cd_init(&sdhci_s3c_notify_change);
+#ifdef CONFIG_MACH_PX
+ if (pdata->ext_pdev)
+ pdata->ext_pdev(pdev);
+#endif
+ }
if (pdata->cd_type == S3C_SDHCI_CD_GPIO &&
gpio_is_valid(pdata->ext_cd_gpio))
sdhci_s3c_setup_card_detect_gpio(sc);
+#ifdef CONFIG_MACH_MIDAS_01_BD
+ /* if card dose not exist, it should turn vtf off */
+ if (pdata->cd_type == S3C_SDHCI_CD_GPIO &&
+ sdhci_s3c_get_card_exist(host))
+ sdhci_s3c_vtf_on_off(1);
+ else
+ sdhci_s3c_vtf_on_off(0);
+#endif
+
return 0;
err_add_host:
@@ -613,17 +794,32 @@ static int __devexit sdhci_s3c_remove(struct platform_device *pdev)
static int sdhci_s3c_suspend(struct platform_device *dev, pm_message_t pm)
{
struct sdhci_host *host = platform_get_drvdata(dev);
+ int ret = 0;
- sdhci_suspend_host(host, pm);
- return 0;
+ ret = sdhci_suspend_host(host, pm);
+
+#ifdef CONFIG_MACH_MIDAS_01_BD
+ /* turn vdd_tflash off */
+ sdhci_s3c_vtf_on_off(0);
+#endif
+ return ret;
}
static int sdhci_s3c_resume(struct platform_device *dev)
{
struct sdhci_host *host = platform_get_drvdata(dev);
+ int ret = 0;
- sdhci_resume_host(host);
- return 0;
+#ifdef CONFIG_MACH_MIDAS_01_BD
+ /* turn vdd_tflash off if a card exists*/
+ if (sdhci_s3c_get_card_exist(host))
+ sdhci_s3c_vtf_on_off(1);
+ else
+ sdhci_s3c_vtf_on_off(0);
+
+#endif
+ ret = sdhci_resume_host(host);
+ return ret;
}
#else
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 6d3de08..9f23876 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -25,13 +25,16 @@
#include <linux/mmc/mmc.h>
#include <linux/mmc/host.h>
+#include <linux/mmc/card.h>
#include "sdhci.h"
+#include <linux/gpio.h>
+
#define DRIVER_NAME "sdhci"
#define DBG(f, x...) \
- pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
+ pr_debug(DRIVER_NAME " [%s()]: " f, __func__, ## x)
#if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \
defined(CONFIG_MMC_SDHCI_MODULE))
@@ -46,9 +49,25 @@ static void sdhci_finish_data(struct sdhci_host *);
static void sdhci_send_command(struct sdhci_host *, struct mmc_command *);
static void sdhci_finish_command(struct sdhci_host *);
-static int sdhci_execute_tuning(struct mmc_host *mmc);
+static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
static void sdhci_tuning_timer(unsigned long data);
+#define MAX_BUS_CLK (4)
+
+struct sdhci_s3c {
+ struct sdhci_host *host;
+ struct platform_device *pdev;
+ struct resource *ioarea;
+ struct s3c_sdhci_platdata *pdata;
+ unsigned int cur_clk;
+ int ext_cd_irq;
+ int ext_cd_gpio;
+ int ext_cd_gpio_invert;
+
+ struct clk *clk_io;
+ struct clk *clk_bus[MAX_BUS_CLK];
+};
+
static void sdhci_dumpregs(struct sdhci_host *host)
{
printk(KERN_DEBUG DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
@@ -651,6 +670,17 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
break;
}
+ /* card's type is SD, set timeout */
+ if (host->mmc->card && mmc_card_sd(host->mmc->card)) {
+ count += 2;
+ /*
+ * It's to prevent warning error log,
+ * If count value is more than 0xD before add 2.
+ */
+ if (count >= 0xF)
+ count = 0xE;
+ }
+
if (count >= 0xF) {
printk(KERN_WARNING "%s: Too large timeout requested for CMD%d!\n",
mmc_hostname(host->mmc), cmd->opcode);
@@ -1006,7 +1036,7 @@ static void sdhci_finish_command(struct sdhci_host *host)
if (host->cmd->flags & MMC_RSP_PRESENT) {
if (host->cmd->flags & MMC_RSP_136) {
/* CRC is stripped so we need to do some shifting. */
- for (i = 0;i < 4;i++) {
+ for (i = 0 ; i < 4 ; i++) {
host->cmd->resp[i] = sdhci_readl(host,
SDHCI_RESPONSE + (3-i)*4) << 8;
if (i != 3)
@@ -1044,7 +1074,7 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
u16 clk = 0;
unsigned long timeout;
- if (clock == host->clock)
+ if (clock && clock == host->clock)
return;
if (host->ops->set_clock) {
@@ -1250,13 +1280,12 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
if ((host->flags & SDHCI_NEEDS_RETUNING) &&
!(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ))) {
spin_unlock_irqrestore(&host->lock, flags);
- sdhci_execute_tuning(mmc);
+ sdhci_execute_tuning(mmc, mrq->cmd->opcode);
spin_lock_irqsave(&host->lock, flags);
/* Restore original mmc_request structure */
host->mrq = mrq;
}
-
if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
sdhci_send_command(host, mrq->sbc);
else
@@ -1275,10 +1304,41 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
host = mmc_priv(mmc);
+ if ((!mmc_host_sd_present(mmc) ||
+ (mmc_host_sd_present(mmc) &&
+ !mmc_host_sd_init_stat(mmc) &&
+ mmc_host_sd_prev_stat(mmc))) &&
+ ios->power_mode == MMC_POWER_OFF) {
+ mmc_host_sd_clear_prev_stat(mmc);
+ if (host->vmmc && regulator_is_enabled(host->vmmc)) {
+#ifdef CONFIG_MIDAS_COMMON
+ if (host->ops->set_power)
+ host->ops->set_power(0);
+#endif
+ regulator_disable(host->vmmc);
+ pr_info("%s : MMC Card OFF %s\n", __func__,
+ host->hw_name);
+ }
+ } else if (mmc_host_sd_present(mmc) &&
+ !mmc_host_sd_prev_stat(mmc)) {
+ mmc_host_sd_set_prev_stat(mmc);
+ if (host->vmmc && !regulator_is_enabled(host->vmmc)) {
+#ifdef CONFIG_MIDAS_COMMON
+ if (host->ops->set_power)
+ host->ops->set_power(1);
+#endif
+ regulator_enable(host->vmmc);
+ pr_info("%s : MMC Card ON %s\n", __func__,
+ host->hw_name);
+ }
+ }
+
spin_lock_irqsave(&host->lock, flags);
- if (host->flags & SDHCI_DEVICE_DEAD)
+ if (host->flags & SDHCI_DEVICE_DEAD) {
+ sdhci_set_clock(host, 0);
goto out;
+ }
/*
* Reset the chip on each power off.
@@ -1416,7 +1476,7 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
* signalling timeout and CRC errors even on CMD0. Resetting
* it on each ios seems to solve the problem.
*/
- if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
+ if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
out:
@@ -1469,6 +1529,14 @@ static int sdhci_get_ro(struct mmc_host *mmc)
return 0;
}
+static void sdhci_hw_reset(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+
+ if (host->ops && host->ops->hw_reset)
+ host->ops->hw_reset(host);
+}
+
static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
{
struct sdhci_host *host;
@@ -1593,7 +1661,7 @@ static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
return 0;
}
-static int sdhci_execute_tuning(struct mmc_host *mmc)
+static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct sdhci_host *host;
u16 ctrl;
@@ -1651,7 +1719,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc)
if (!tuning_loop_counter && !timeout)
break;
- cmd.opcode = MMC_SEND_TUNING_BLOCK;
+ cmd.opcode = opcode;
cmd.arg = 0;
cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
cmd.retries = 0;
@@ -1803,6 +1871,7 @@ static const struct mmc_host_ops sdhci_ops = {
.request = sdhci_request,
.set_ios = sdhci_set_ios,
.get_ro = sdhci_get_ro,
+ .hw_reset = sdhci_hw_reset,
.enable_sdio_irq = sdhci_enable_sdio_irq,
.start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
.execute_tuning = sdhci_execute_tuning,
@@ -1820,16 +1889,16 @@ static void sdhci_tasklet_card(unsigned long param)
struct sdhci_host *host;
unsigned long flags;
- host = (struct sdhci_host*)param;
+ host = (struct sdhci_host *)param;
spin_lock_irqsave(&host->lock, flags);
if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) {
if (host->mrq) {
printk(KERN_ERR "%s: Card removed during transfer!\n",
- mmc_hostname(host->mmc));
+ mmc_hostname(host->mmc));
printk(KERN_ERR "%s: Resetting controller.\n",
- mmc_hostname(host->mmc));
+ mmc_hostname(host->mmc));
sdhci_reset(host, SDHCI_RESET_CMD);
sdhci_reset(host, SDHCI_RESET_DATA);
@@ -1841,7 +1910,11 @@ static void sdhci_tasklet_card(unsigned long param)
spin_unlock_irqrestore(&host->lock, flags);
- mmc_detect_change(host->mmc, msecs_to_jiffies(200));
+ if (host->vmmc &&
+ !(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION))
+ mmc_detect_change(host->mmc, msecs_to_jiffies(0));
+ else
+ mmc_detect_change(host->mmc, msecs_to_jiffies(200));
}
static void sdhci_tasklet_finish(unsigned long param)
@@ -1850,12 +1923,12 @@ static void sdhci_tasklet_finish(unsigned long param)
unsigned long flags;
struct mmc_request *mrq;
- host = (struct sdhci_host*)param;
+ host = (struct sdhci_host *)param;
- /*
- * If this tasklet gets rescheduled while running, it will
- * be run again afterwards but without any active request.
- */
+ /*
+ * If this tasklet gets rescheduled while running, it will
+ * be run again afterwards but without any active request.
+ */
if (!host->mrq)
return;
@@ -1910,7 +1983,7 @@ static void sdhci_timeout_timer(unsigned long data)
struct sdhci_host *host;
unsigned long flags;
- host = (struct sdhci_host*)data;
+ host = (struct sdhci_host *)data;
spin_lock_irqsave(&host->lock, flags);
@@ -1968,11 +2041,20 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
return;
}
- if (intmask & SDHCI_INT_TIMEOUT)
+ if (intmask & SDHCI_INT_TIMEOUT) {
+ printk(KERN_INFO "%s: cmd %d command timeout error\n",
+ mmc_hostname(host->mmc), host->cmd->opcode);
host->cmd->error = -ETIMEDOUT;
- else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT |
- SDHCI_INT_INDEX))
+ } else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT |
+ SDHCI_INT_INDEX)) {
+ printk(KERN_ERR "%s: cmd %d %s error\n",
+ mmc_hostname(host->mmc), host->cmd->opcode,
+ (intmask & SDHCI_INT_CRC) ? "command crc" :
+ (intmask & SDHCI_INT_END_BIT) ? "command end bit" :
+ "command index error");
host->cmd->error = -EILSEQ;
+ }
+
if (host->cmd->error) {
tasklet_schedule(&host->finish_tasklet);
@@ -2069,15 +2151,17 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
return;
}
- if (intmask & SDHCI_INT_DATA_TIMEOUT)
- host->data->error = -ETIMEDOUT;
- else if (intmask & SDHCI_INT_DATA_END_BIT)
- host->data->error = -EILSEQ;
- else if ((intmask & SDHCI_INT_DATA_CRC) &&
- SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
- != MMC_BUS_TEST_R)
+ if (intmask & SDHCI_INT_DATA_TIMEOUT) {
+ printk(KERN_ERR "%s: cmd %d data timeout error\n",
+ mmc_hostname(host->mmc), host->mrq->cmd->opcode);
+ host->data->error = -ETIMEDOUT;
+ } else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT)) {
+ printk(KERN_ERR "%s: cmd %d %s error\n",
+ mmc_hostname(host->mmc), host->mrq->cmd->opcode,
+ (intmask & SDHCI_INT_DATA_CRC) ? "data crc" :
+ "command end bit");
host->data->error = -EILSEQ;
- else if (intmask & SDHCI_INT_ADMA_ERROR) {
+ } else if (intmask & SDHCI_INT_ADMA_ERROR) {
printk(KERN_ERR "%s: ADMA error\n", mmc_hostname(host->mmc));
sdhci_show_adma_error(host);
host->data->error = -EIO;
@@ -2134,7 +2218,7 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
static irqreturn_t sdhci_irq(int irq, void *dev_id)
{
irqreturn_t result;
- struct sdhci_host* host = dev_id;
+ struct sdhci_host *host = dev_id;
u32 intmask;
int cardint = 0;
@@ -2232,14 +2316,27 @@ int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state)
host->tuning_count * HZ);
}
+ if (host->mmc->pm_flags & MMC_PM_IGNORE_SUSPEND_RESUME) {
+ host->mmc->pm_flags |= MMC_PM_KEEP_POWER;
+ pr_info("%s : Enter WIFI suspend\n", __func__);
+ }
+
ret = mmc_suspend_host(host->mmc);
if (ret)
return ret;
free_irq(host->irq, host);
- if (host->vmmc)
- ret = regulator_disable(host->vmmc);
+ if (host->vmmc) {
+ if (regulator_is_enabled(host->vmmc)) {
+#ifdef CONFIG_MIDAS_COMMON
+ if (host->ops->set_power)
+ host->ops->set_power(0);
+#endif
+ ret = regulator_disable(host->vmmc);
+ pr_info("%s : MMC Card OFF\n", __func__);
+ }
+ }
return ret;
}
@@ -2250,10 +2347,15 @@ int sdhci_resume_host(struct sdhci_host *host)
{
int ret;
- if (host->vmmc) {
- int ret = regulator_enable(host->vmmc);
+ if (host->vmmc && !regulator_is_enabled(host->vmmc)) {
+#ifdef CONFIG_MIDAS_COMMON
+ if (host->ops->set_power)
+ host->ops->set_power(1);
+#endif
+ ret = regulator_enable(host->vmmc);
if (ret)
return ret;
+ pr_info("%s : MMC Card ON\n", __func__);
}
@@ -2278,6 +2380,13 @@ int sdhci_resume_host(struct sdhci_host *host)
(host->tuning_mode == SDHCI_TUNING_MODE_1))
host->flags |= SDHCI_NEEDS_RETUNING;
+#ifdef CONFIG_MACH_PX
+ /* host has a card and the card is SDIO type */
+ if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
+ /* enable sdio interrupt */
+ sdhci_enable_sdio_irq(host->mmc, 1);
+ }
+#endif
return ret;
}
@@ -2328,6 +2437,7 @@ int sdhci_add_host(struct sdhci_host *host)
u32 max_current_caps;
unsigned int ocr_avail;
int ret;
+ struct sdhci_s3c *sc;
WARN_ON(host == NULL);
if (host == NULL)
@@ -2485,7 +2595,12 @@ int sdhci_add_host(struct sdhci_host *host)
} else
mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
- mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
+ if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)
+ mmc->max_discard_to = (1 << 27) / (mmc->f_max / 1000);
+ else
+ mmc->max_discard_to = (1 << 27) / host->timeout_clk;
+
+ mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE;
if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
host->flags |= SDHCI_AUTO_CMD12;
@@ -2542,6 +2657,15 @@ int sdhci_add_host(struct sdhci_host *host)
if (caps[1] & SDHCI_DRIVER_TYPE_D)
mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
+ /*
+ * If Power Off Notify capability is enabled by the host,
+ * set notify to short power off notify timeout value.
+ */
+ if (mmc->caps2 & MMC_CAP2_POWEROFF_NOTIFY)
+ mmc->power_notify_type = MMC_HOST_PW_NOTIFY_SHORT;
+ else
+ mmc->power_notify_type = MMC_HOST_PW_NOTIFY_NONE;
+
/* Initial value for re-tuning timer count */
host->tuning_count = (caps[1] & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
SDHCI_RETUNING_TIMER_COUNT_SHIFT;
@@ -2716,12 +2840,32 @@ int sdhci_add_host(struct sdhci_host *host)
if (ret)
goto untasklet;
- host->vmmc = regulator_get(mmc_dev(mmc), "vmmc");
+ sc = sdhci_priv(host);
+
+ if (host->vmmc_name)
+ host->vmmc = regulator_get(mmc_dev(mmc), host->vmmc_name);
+ else
+ host->vmmc = regulator_get(mmc_dev(mmc), "vmmc");
+
if (IS_ERR(host->vmmc)) {
- printk(KERN_INFO "%s: no vmmc regulator found\n", mmc_hostname(mmc));
+ printk(KERN_ERR "%s: no %s regulator found\n",
+ mmc_hostname(mmc),
+ host->vmmc_name ? host->vmmc_name : "vmmc");
host->vmmc = NULL;
} else {
- regulator_enable(host->vmmc);
+ printk(KERN_INFO "%s: %s regulator found\n",
+ mmc_hostname(mmc),
+ host->vmmc_name ? host->vmmc_name : "vmmc");
+ if (sc->ext_cd_gpio) {
+ if (gpio_get_value(sc->ext_cd_gpio) != (sc->ext_cd_gpio_invert)) {
+#ifdef CONFIG_MIDAS_COMMON
+ if (host->ops->set_power)
+ host->ops->set_power(1);
+#endif
+ regulator_enable(host->vmmc);
+ mdelay(100);
+ }
+ }
}
sdhci_init(host, 0);
@@ -2810,7 +2954,11 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
tasklet_kill(&host->card_tasklet);
tasklet_kill(&host->finish_tasklet);
- if (host->vmmc) {
+ if (host->vmmc && regulator_is_enabled(host->vmmc)) {
+#ifdef CONFIG_MIDAS_COMMON
+ if (host->ops->set_power)
+ host->ops->set_power(0);
+#endif
regulator_disable(host->vmmc);
regulator_put(host->vmmc);
}
@@ -2839,11 +2987,12 @@ EXPORT_SYMBOL_GPL(sdhci_free_host);
static int __init sdhci_drv_init(void)
{
+ int ret = 0;
printk(KERN_INFO DRIVER_NAME
": Secure Digital Host Controller Interface driver\n");
printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
- return 0;
+ return ret;
}
static void __exit sdhci_drv_exit(void)
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 745c42f..b04e361 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -273,7 +273,8 @@ struct sdhci_ops {
void (*platform_reset_enter)(struct sdhci_host *host, u8 mask);
void (*platform_reset_exit)(struct sdhci_host *host, u8 mask);
int (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs);
-
+ void (*hw_reset)(struct sdhci_host *host);
+ void (*set_power)(int on_off);
};
#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS