aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/message/i2o/bus-osm.c21
-rw-r--r--drivers/message/i2o/device.c51
-rw-r--r--drivers/message/i2o/exec-osm.c93
-rw-r--r--drivers/message/i2o/i2o_block.c157
-rw-r--r--drivers/message/i2o/i2o_config.c169
-rw-r--r--drivers/message/i2o/i2o_scsi.c50
-rw-r--r--drivers/message/i2o/iop.c296
-rw-r--r--drivers/message/i2o/pci.c1
-rw-r--r--include/linux/i2o.h975
9 files changed, 964 insertions, 849 deletions
diff --git a/drivers/message/i2o/bus-osm.c b/drivers/message/i2o/bus-osm.c
index 151b228..ce039d3 100644
--- a/drivers/message/i2o/bus-osm.c
+++ b/drivers/message/i2o/bus-osm.c
@@ -39,18 +39,18 @@ static struct i2o_class_id i2o_bus_class_id[] = {
*/
static int i2o_bus_scan(struct i2o_device *dev)
{
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
- m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
+ msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
return -ETIMEDOUT;
- writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_BUS_SCAN << 24 | HOST_TID << 12 | dev->lct_data.tid,
- &msg->u.head[1]);
+ msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_BUS_SCAN << 24 | HOST_TID << 12 | dev->lct_data.
+ tid);
- return i2o_msg_post_wait(dev->iop, m, 60);
+ return i2o_msg_post_wait(dev->iop, msg, 60);
};
/**
@@ -59,8 +59,9 @@ static int i2o_bus_scan(struct i2o_device *dev)
*
* Returns count.
*/
-static ssize_t i2o_bus_store_scan(struct device *d, struct device_attribute *attr, const char *buf,
- size_t count)
+static ssize_t i2o_bus_store_scan(struct device *d,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct i2o_device *i2o_dev = to_i2o_device(d);
int rc;
diff --git a/drivers/message/i2o/device.c b/drivers/message/i2o/device.c
index 8eb50cd..002ae0e 100644
--- a/drivers/message/i2o/device.c
+++ b/drivers/message/i2o/device.c
@@ -35,18 +35,18 @@
static inline int i2o_device_issue_claim(struct i2o_device *dev, u32 cmd,
u32 type)
{
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
- m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
+ msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(cmd << 24 | HOST_TID << 12 | dev->lct_data.tid, &msg->u.head[1]);
- writel(type, &msg->body[0]);
+ msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(cmd << 24 | HOST_TID << 12 | dev->lct_data.tid);
+ msg->body[0] = cpu_to_le32(type);
- return i2o_msg_post_wait(dev->iop, m, 60);
+ return i2o_msg_post_wait(dev->iop, msg, 60);
}
/**
@@ -419,10 +419,9 @@ int i2o_device_parse_lct(struct i2o_controller *c)
* ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
*/
int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist,
- int oplen, void *reslist, int reslen)
+ int oplen, void *reslist, int reslen)
{
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
u32 *res32 = (u32 *) reslist;
u32 *restmp = (u32 *) reslist;
int len = 0;
@@ -437,26 +436,28 @@ int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist,
if (i2o_dma_alloc(dev, &res, reslen, GFP_KERNEL))
return -ENOMEM;
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY) {
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg)) {
i2o_dma_free(dev, &res);
- return -ETIMEDOUT;
+ return PTR_ERR(msg);
}
i = 0;
- writel(cmd << 24 | HOST_TID << 12 | i2o_dev->lct_data.tid,
- &msg->u.head[1]);
- writel(0, &msg->body[i++]);
- writel(0x4C000000 | oplen, &msg->body[i++]); /* OperationList */
- memcpy_toio(&msg->body[i], oplist, oplen);
+ msg->u.head[1] =
+ cpu_to_le32(cmd << 24 | HOST_TID << 12 | i2o_dev->lct_data.tid);
+ msg->body[i++] = cpu_to_le32(0x00000000);
+ msg->body[i++] = cpu_to_le32(0x4C000000 | oplen); /* OperationList */
+ memcpy(&msg->body[i], oplist, oplen);
+
i += (oplen / 4 + (oplen % 4 ? 1 : 0));
- writel(0xD0000000 | res.len, &msg->body[i++]); /* ResultList */
- writel(res.phys, &msg->body[i++]);
+ msg->body[i++] = cpu_to_le32(0xD0000000 | res.len); /* ResultList */
+ msg->body[i++] = cpu_to_le32(res.phys);
- writel(I2O_MESSAGE_SIZE(i + sizeof(struct i2o_message) / 4) |
- SGL_OFFSET_5, &msg->u.head[0]);
+ msg->u.head[0] =
+ cpu_to_le32(I2O_MESSAGE_SIZE(i + sizeof(struct i2o_message) / 4) |
+ SGL_OFFSET_5);
- rc = i2o_msg_post_wait_mem(c, m, 10, &res);
+ rc = i2o_msg_post_wait_mem(c, msg, 10, &res);
/* This only looks like a memory leak - don't "fix" it. */
if (rc == -ETIMEDOUT)
diff --git a/drivers/message/i2o/exec-osm.c b/drivers/message/i2o/exec-osm.c
index 9c339a2..71a0933 100644
--- a/drivers/message/i2o/exec-osm.c
+++ b/drivers/message/i2o/exec-osm.c
@@ -114,13 +114,12 @@ static void i2o_exec_wait_free(struct i2o_exec_wait *wait)
* Returns 0 on success, negative error code on timeout or positive error
* code from reply.
*/
-int i2o_msg_post_wait_mem(struct i2o_controller *c, u32 m, unsigned long
- timeout, struct i2o_dma *dma)
+int i2o_msg_post_wait_mem(struct i2o_controller *c, struct i2o_message *msg,
+ unsigned long timeout, struct i2o_dma *dma)
{
DECLARE_WAIT_QUEUE_HEAD(wq);
struct i2o_exec_wait *wait;
static u32 tcntxt = 0x80000000;
- struct i2o_message __iomem *msg = i2o_msg_in_to_virt(c, m);
int rc = 0;
wait = i2o_exec_wait_alloc();
@@ -138,15 +137,15 @@ int i2o_msg_post_wait_mem(struct i2o_controller *c, u32 m, unsigned long
* We will only use transaction contexts >= 0x80000000 for POST WAIT,
* so we could find a POST WAIT reply easier in the reply handler.
*/
- writel(i2o_exec_driver.context, &msg->u.s.icntxt);
+ msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
wait->tcntxt = tcntxt++;
- writel(wait->tcntxt, &msg->u.s.tcntxt);
+ msg->u.s.tcntxt = cpu_to_le32(wait->tcntxt);
/*
* Post the message to the controller. At some point later it will
* return. If we time out before it returns then complete will be zero.
*/
- i2o_msg_post(c, m);
+ i2o_msg_post(c, msg);
if (!wait->complete) {
wait->wq = &wq;
@@ -266,7 +265,8 @@ static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m,
*
* Returns number of bytes printed into buffer.
*/
-static ssize_t i2o_exec_show_vendor_id(struct device *d, struct device_attribute *attr, char *buf)
+static ssize_t i2o_exec_show_vendor_id(struct device *d,
+ struct device_attribute *attr, char *buf)
{
struct i2o_device *dev = to_i2o_device(d);
u16 id;
@@ -286,7 +286,9 @@ static ssize_t i2o_exec_show_vendor_id(struct device *d, struct device_attribute
*
* Returns number of bytes printed into buffer.
*/
-static ssize_t i2o_exec_show_product_id(struct device *d, struct device_attribute *attr, char *buf)
+static ssize_t i2o_exec_show_product_id(struct device *d,
+ struct device_attribute *attr,
+ char *buf)
{
struct i2o_device *dev = to_i2o_device(d);
u16 id;
@@ -385,23 +387,22 @@ static int i2o_exec_reply(struct i2o_controller *c, u32 m,
u32 context;
if (le32_to_cpu(msg->u.head[0]) & MSG_FAIL) {
+ struct i2o_message __iomem *pmsg;
+ u32 pm;
+
/*
* If Fail bit is set we must take the transaction context of
* the preserved message to find the right request again.
*/
- struct i2o_message __iomem *pmsg;
- u32 pm;
pm = le32_to_cpu(msg->body[3]);
-
pmsg = i2o_msg_in_to_virt(c, pm);
+ context = readl(&pmsg->u.s.tcntxt);
i2o_report_status(KERN_INFO, "i2o_core", msg);
- context = readl(&pmsg->u.s.tcntxt);
-
/* Release the preserved msg */
- i2o_msg_nop(c, pm);
+ i2o_msg_nop_mfa(c, pm);
} else
context = le32_to_cpu(msg->u.s.tcntxt);
@@ -462,25 +463,26 @@ static void i2o_exec_event(struct i2o_event *evt)
*/
int i2o_exec_lct_get(struct i2o_controller *c)
{
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
int i = 0;
int rc = -EAGAIN;
for (i = 1; i <= I2O_LCT_GET_TRIES; i++) {
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
-
- writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6, &msg->u.head[0]);
- writel(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 | ADAPTER_TID,
- &msg->u.head[1]);
- writel(0xffffffff, &msg->body[0]);
- writel(0x00000000, &msg->body[1]);
- writel(0xd0000000 | c->dlct.len, &msg->body[2]);
- writel(c->dlct.phys, &msg->body[3]);
-
- rc = i2o_msg_post_wait(c, m, I2O_TIMEOUT_LCT_GET);
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+ msg->u.head[0] =
+ cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 |
+ ADAPTER_TID);
+ msg->body[0] = cpu_to_le32(0xffffffff);
+ msg->body[1] = cpu_to_le32(0x00000000);
+ msg->body[2] = cpu_to_le32(0xd0000000 | c->dlct.len);
+ msg->body[3] = cpu_to_le32(c->dlct.phys);
+
+ rc = i2o_msg_post_wait(c, msg, I2O_TIMEOUT_LCT_GET);
if (rc < 0)
break;
@@ -506,29 +508,28 @@ static int i2o_exec_lct_notify(struct i2o_controller *c, u32 change_ind)
{
i2o_status_block *sb = c->status_block.virt;
struct device *dev;
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
dev = &c->pdev->dev;
if (i2o_dma_realloc(dev, &c->dlct, sb->expected_lct_size, GFP_KERNEL))
return -ENOMEM;
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
-
- writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6, &msg->u.head[0]);
- writel(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 | ADAPTER_TID,
- &msg->u.head[1]);
- writel(i2o_exec_driver.context, &msg->u.s.icntxt);
- writel(0, &msg->u.s.tcntxt); /* FIXME */
- writel(0xffffffff, &msg->body[0]);
- writel(change_ind, &msg->body[1]);
- writel(0xd0000000 | c->dlct.len, &msg->body[2]);
- writel(c->dlct.phys, &msg->body[3]);
-
- i2o_msg_post(c, m);
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+ msg->u.head[0] = cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6);
+ msg->u.head[1] = cpu_to_le32(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 |
+ ADAPTER_TID);
+ msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
+ msg->u.s.tcntxt = cpu_to_le32(0x00000000);
+ msg->body[0] = cpu_to_le32(0xffffffff);
+ msg->body[1] = cpu_to_le32(change_ind);
+ msg->body[2] = cpu_to_le32(0xd0000000 | c->dlct.len);
+ msg->body[3] = cpu_to_le32(c->dlct.phys);
+
+ i2o_msg_post(c, msg);
return 0;
};
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
index f283b5b..2bd15c7 100644
--- a/drivers/message/i2o/i2o_block.c
+++ b/drivers/message/i2o/i2o_block.c
@@ -130,20 +130,20 @@ static int i2o_block_remove(struct device *dev)
*/
static int i2o_block_device_flush(struct i2o_device *dev)
{
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
- m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
+ msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_BLOCK_CFLUSH << 24 | HOST_TID << 12 | dev->lct_data.tid,
- &msg->u.head[1]);
- writel(60 << 16, &msg->body[0]);
+ msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_BLOCK_CFLUSH << 24 | HOST_TID << 12 | dev->
+ lct_data.tid);
+ msg->body[0] = cpu_to_le32(60 << 16);
osm_debug("Flushing...\n");
- return i2o_msg_post_wait(dev->iop, m, 60);
+ return i2o_msg_post_wait(dev->iop, msg, 60);
};
/**
@@ -181,21 +181,21 @@ static int i2o_block_issue_flush(request_queue_t * queue, struct gendisk *disk,
*/
static int i2o_block_device_mount(struct i2o_device *dev, u32 media_id)
{
- struct i2o_message __iomem *msg;
- u32 m;
-
- m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
-
- writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_BLOCK_MMOUNT << 24 | HOST_TID << 12 | dev->lct_data.tid,
- &msg->u.head[1]);
- writel(-1, &msg->body[0]);
- writel(0, &msg->body[1]);
+ struct i2o_message *msg;
+
+ msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+ msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_BLOCK_MMOUNT << 24 | HOST_TID << 12 | dev->
+ lct_data.tid);
+ msg->body[0] = cpu_to_le32(-1);
+ msg->body[1] = cpu_to_le32(0x00000000);
osm_debug("Mounting...\n");
- return i2o_msg_post_wait(dev->iop, m, 2);
+ return i2o_msg_post_wait(dev->iop, msg, 2);
};
/**
@@ -210,20 +210,20 @@ static int i2o_block_device_mount(struct i2o_device *dev, u32 media_id)
*/
static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id)
{
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
- m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
+ msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg) == I2O_QUEUE_EMPTY)
+ return PTR_ERR(msg);
- writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_BLOCK_MLOCK << 24 | HOST_TID << 12 | dev->lct_data.tid,
- &msg->u.head[1]);
- writel(-1, &msg->body[0]);
+ msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_BLOCK_MLOCK << 24 | HOST_TID << 12 | dev->
+ lct_data.tid);
+ msg->body[0] = cpu_to_le32(-1);
osm_debug("Locking...\n");
- return i2o_msg_post_wait(dev->iop, m, 2);
+ return i2o_msg_post_wait(dev->iop, msg, 2);
};
/**
@@ -238,20 +238,20 @@ static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id)
*/
static int i2o_block_device_unlock(struct i2o_device *dev, u32 media_id)
{
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
- m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
+ msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_BLOCK_MUNLOCK << 24 | HOST_TID << 12 | dev->lct_data.tid,
- &msg->u.head[1]);
- writel(media_id, &msg->body[0]);
+ msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_BLOCK_MUNLOCK << 24 | HOST_TID << 12 | dev->
+ lct_data.tid);
+ msg->body[0] = cpu_to_le32(media_id);
osm_debug("Unlocking...\n");
- return i2o_msg_post_wait(dev->iop, m, 2);
+ return i2o_msg_post_wait(dev->iop, msg, 2);
};
/**
@@ -267,21 +267,21 @@ static int i2o_block_device_power(struct i2o_block_device *dev, u8 op)
{
struct i2o_device *i2o_dev = dev->i2o_dev;
struct i2o_controller *c = i2o_dev->iop;
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
int rc;
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_BLOCK_POWER << 24 | HOST_TID << 12 | i2o_dev->lct_data.
- tid, &msg->u.head[1]);
- writel(op << 24, &msg->body[0]);
+ msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_BLOCK_POWER << 24 | HOST_TID << 12 | i2o_dev->
+ lct_data.tid);
+ msg->body[0] = cpu_to_le32(op << 24);
osm_debug("Power...\n");
- rc = i2o_msg_post_wait(c, m, 60);
+ rc = i2o_msg_post_wait(c, msg, 60);
if (!rc)
dev->power = op;
@@ -331,7 +331,7 @@ static inline void i2o_block_request_free(struct i2o_block_request *ireq)
*/
static inline int i2o_block_sglist_alloc(struct i2o_controller *c,
struct i2o_block_request *ireq,
- u32 __iomem ** mptr)
+ u32 ** mptr)
{
int nents;
enum dma_data_direction direction;
@@ -745,10 +745,9 @@ static int i2o_block_transfer(struct request *req)
struct i2o_block_device *dev = req->rq_disk->private_data;
struct i2o_controller *c;
int tid = dev->i2o_dev->lct_data.tid;
- struct i2o_message __iomem *msg;
- u32 __iomem *mptr;
+ struct i2o_message *msg;
+ u32 *mptr;
struct i2o_block_request *ireq = req->special;
- u32 m;
u32 tcntxt;
u32 sgl_offset = SGL_OFFSET_8;
u32 ctl_flags = 0x00000000;
@@ -763,9 +762,9 @@ static int i2o_block_transfer(struct request *req)
c = dev->i2o_dev->iop;
- m = i2o_msg_get(c, &msg);
- if (m == I2O_QUEUE_EMPTY) {
- rc = -EBUSY;
+ msg = i2o_msg_get(c);
+ if (IS_ERR(msg)) {
+ rc = PTR_ERR(msg);
goto exit;
}
@@ -775,8 +774,8 @@ static int i2o_block_transfer(struct request *req)
goto nop_msg;
}
- writel(i2o_block_driver.context, &msg->u.s.icntxt);
- writel(tcntxt, &msg->u.s.tcntxt);
+ msg->u.s.icntxt = cpu_to_le32(i2o_block_driver.context);
+ msg->u.s.tcntxt = cpu_to_le32(tcntxt);
mptr = &msg->body[0];
@@ -834,11 +833,11 @@ static int i2o_block_transfer(struct request *req)
sgl_offset = SGL_OFFSET_12;
- writel(I2O_CMD_PRIVATE << 24 | HOST_TID << 12 | tid,
- &msg->u.head[1]);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_PRIVATE << 24 | HOST_TID << 12 | tid);
- writel(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC, mptr++);
- writel(tid, mptr++);
+ *mptr++ = cpu_to_le32(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC);
+ *mptr++ = cpu_to_le32(tid);
/*
* ENABLE_DISCONNECT
@@ -853,22 +852,24 @@ static int i2o_block_transfer(struct request *req)
scsi_flags = 0xa0a0000a;
}
- writel(scsi_flags, mptr++);
+ *mptr++ = cpu_to_le32(scsi_flags);
*((u32 *) & cmd[2]) = cpu_to_be32(req->sector * hwsec);
*((u16 *) & cmd[7]) = cpu_to_be16(req->nr_sectors * hwsec);
- memcpy_toio(mptr, cmd, 10);
+ memcpy(mptr, cmd, 10);
mptr += 4;
- writel(req->nr_sectors << KERNEL_SECTOR_SHIFT, mptr++);
+ *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT);
} else
#endif
{
- writel(cmd | HOST_TID << 12 | tid, &msg->u.head[1]);
- writel(ctl_flags, mptr++);
- writel(req->nr_sectors << KERNEL_SECTOR_SHIFT, mptr++);
- writel((u32) (req->sector << KERNEL_SECTOR_SHIFT), mptr++);
- writel(req->sector >> (32 - KERNEL_SECTOR_SHIFT), mptr++);
+ msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid);
+ *mptr++ = cpu_to_le32(ctl_flags);
+ *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT);
+ *mptr++ =
+ cpu_to_le32((u32) (req->sector << KERNEL_SECTOR_SHIFT));
+ *mptr++ =
+ cpu_to_le32(req->sector >> (32 - KERNEL_SECTOR_SHIFT));
}
if (!i2o_block_sglist_alloc(c, ireq, &mptr)) {
@@ -876,13 +877,13 @@ static int i2o_block_transfer(struct request *req)
goto context_remove;
}
- writel(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) |
- sgl_offset, &msg->u.head[0]);
+ msg->u.head[0] =
+ cpu_to_le32(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | sgl_offset);
list_add_tail(&ireq->queue, &dev->open_queue);
dev->open_queue_depth++;
- i2o_msg_post(c, m);
+ i2o_msg_post(c, msg);
return 0;
@@ -890,7 +891,7 @@ static int i2o_block_transfer(struct request *req)
i2o_cntxt_list_remove(c, req);
nop_msg:
- i2o_msg_nop(c, m);
+ i2o_msg_nop(c, msg);
exit:
return rc;
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
index 3c3a7ab..4fe73d6 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/message/i2o/i2o_config.c
@@ -230,8 +230,7 @@ static int i2o_cfg_swdl(unsigned long arg)
struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg;
unsigned char maxfrag = 0, curfrag = 1;
struct i2o_dma buffer;
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
unsigned int status = 0, swlen = 0, fragsize = 8192;
struct i2o_controller *c;
@@ -257,31 +256,34 @@ static int i2o_cfg_swdl(unsigned long arg)
if (!c)
return -ENXIO;
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -EBUSY;
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize, GFP_KERNEL)) {
- i2o_msg_nop(c, m);
+ i2o_msg_nop(c, msg);
return -ENOMEM;
}
__copy_from_user(buffer.virt, kxfer.buf, fragsize);
- writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_7, &msg->u.head[0]);
- writel(I2O_CMD_SW_DOWNLOAD << 24 | HOST_TID << 12 | ADAPTER_TID,
- &msg->u.head[1]);
- writel(i2o_config_driver.context, &msg->u.head[2]);
- writel(0, &msg->u.head[3]);
- writel((((u32) kxfer.flags) << 24) | (((u32) kxfer.sw_type) << 16) |
- (((u32) maxfrag) << 8) | (((u32) curfrag)), &msg->body[0]);
- writel(swlen, &msg->body[1]);
- writel(kxfer.sw_id, &msg->body[2]);
- writel(0xD0000000 | fragsize, &msg->body[3]);
- writel(buffer.phys, &msg->body[4]);
+ msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_7);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_SW_DOWNLOAD << 24 | HOST_TID << 12 |
+ ADAPTER_TID);
+ msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
+ msg->u.head[3] = cpu_to_le32(0);
+ msg->body[0] =
+ cpu_to_le32((((u32) kxfer.flags) << 24) | (((u32) kxfer.
+ sw_type) << 16) |
+ (((u32) maxfrag) << 8) | (((u32) curfrag)));
+ msg->body[1] = cpu_to_le32(swlen);
+ msg->body[2] = cpu_to_le32(kxfer.sw_id);
+ msg->body[3] = cpu_to_le32(0xD0000000 | fragsize);
+ msg->body[4] = cpu_to_le32(buffer.phys);
osm_debug("swdl frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize);
- status = i2o_msg_post_wait_mem(c, m, 60, &buffer);
+ status = i2o_msg_post_wait_mem(c, msg, 60, &buffer);
if (status != -ETIMEDOUT)
i2o_dma_free(&c->pdev->dev, &buffer);
@@ -302,8 +304,7 @@ static int i2o_cfg_swul(unsigned long arg)
struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg;
unsigned char maxfrag = 0, curfrag = 1;
struct i2o_dma buffer;
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
unsigned int status = 0, swlen = 0, fragsize = 8192;
struct i2o_controller *c;
int ret = 0;
@@ -330,30 +331,30 @@ static int i2o_cfg_swul(unsigned long arg)
if (!c)
return -ENXIO;
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -EBUSY;
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize, GFP_KERNEL)) {
- i2o_msg_nop(c, m);
+ i2o_msg_nop(c, msg);
return -ENOMEM;
}
- writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_7, &msg->u.head[0]);
- writel(I2O_CMD_SW_UPLOAD << 24 | HOST_TID << 12 | ADAPTER_TID,
- &msg->u.head[1]);
- writel(i2o_config_driver.context, &msg->u.head[2]);
- writel(0, &msg->u.head[3]);
- writel((u32) kxfer.flags << 24 | (u32) kxfer.
- sw_type << 16 | (u32) maxfrag << 8 | (u32) curfrag,
- &msg->body[0]);
- writel(swlen, &msg->body[1]);
- writel(kxfer.sw_id, &msg->body[2]);
- writel(0xD0000000 | fragsize, &msg->body[3]);
- writel(buffer.phys, &msg->body[4]);
+ msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_7);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_SW_UPLOAD << 24 | HOST_TID << 12 | ADAPTER_TID);
+ msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
+ msg->u.head[3] = cpu_to_le32(0);
+ msg->body[0] =
+ cpu_to_le32((u32) kxfer.flags << 24 | (u32) kxfer.
+ sw_type << 16 | (u32) maxfrag << 8 | (u32) curfrag);
+ msg->body[1] = cpu_to_le32(swlen);
+ msg->body[2] = cpu_to_le32(kxfer.sw_id);
+ msg->body[3] = cpu_to_le32(0xD0000000 | fragsize);
+ msg->body[4] = cpu_to_le32(buffer.phys);
osm_debug("swul frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize);
- status = i2o_msg_post_wait_mem(c, m, 60, &buffer);
+ status = i2o_msg_post_wait_mem(c, msg, 60, &buffer);
if (status != I2O_POST_WAIT_OK) {
if (status != -ETIMEDOUT)
@@ -380,8 +381,7 @@ static int i2o_cfg_swdel(unsigned long arg)
struct i2o_controller *c;
struct i2o_sw_xfer kxfer;
struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg;
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
unsigned int swlen;
int token;
@@ -395,21 +395,21 @@ static int i2o_cfg_swdel(unsigned long arg)
if (!c)
return -ENXIO;
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -EBUSY;
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- writel(SEVEN_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_SW_REMOVE << 24 | HOST_TID << 12 | ADAPTER_TID,
- &msg->u.head[1]);
- writel(i2o_config_driver.context, &msg->u.head[2]);
- writel(0, &msg->u.head[3]);
- writel((u32) kxfer.flags << 24 | (u32) kxfer.sw_type << 16,
- &msg->body[0]);
- writel(swlen, &msg->body[1]);
- writel(kxfer.sw_id, &msg->body[2]);
+ msg->u.head[0] = cpu_to_le32(SEVEN_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_SW_REMOVE << 24 | HOST_TID << 12 | ADAPTER_TID);
+ msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
+ msg->u.head[3] = cpu_to_le32(0);
+ msg->body[0] =
+ cpu_to_le32((u32) kxfer.flags << 24 | (u32) kxfer.sw_type << 16);
+ msg->body[1] = cpu_to_le32(swlen);
+ msg->body[2] = cpu_to_le32(kxfer.sw_id);
- token = i2o_msg_post_wait(c, m, 10);
+ token = i2o_msg_post_wait(c, msg, 10);
if (token != I2O_POST_WAIT_OK) {
osm_info("swdel failed, DetailedStatus = %d\n", token);
@@ -423,25 +423,24 @@ static int i2o_cfg_validate(unsigned long arg)
{
int token;
int iop = (int)arg;
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
struct i2o_controller *c;
c = i2o_find_iop(iop);
if (!c)
return -ENXIO;
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -EBUSY;
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_CONFIG_VALIDATE << 24 | HOST_TID << 12 | iop,
- &msg->u.head[1]);
- writel(i2o_config_driver.context, &msg->u.head[2]);
- writel(0, &msg->u.head[3]);
+ msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_CONFIG_VALIDATE << 24 | HOST_TID << 12 | iop);
+ msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
+ msg->u.head[3] = cpu_to_le32(0);
- token = i2o_msg_post_wait(c, m, 10);
+ token = i2o_msg_post_wait(c, msg, 10);
if (token != I2O_POST_WAIT_OK) {
osm_info("Can't validate configuration, ErrorStatus = %d\n",
@@ -454,8 +453,7 @@ static int i2o_cfg_validate(unsigned long arg)
static int i2o_cfg_evt_reg(unsigned long arg, struct file *fp)
{
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
struct i2o_evt_id __user *pdesc = (struct i2o_evt_id __user *)arg;
struct i2o_evt_id kdesc;
struct i2o_controller *c;
@@ -474,18 +472,19 @@ static int i2o_cfg_evt_reg(unsigned long arg, struct file *fp)
if (!d)
return -ENODEV;
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -EBUSY;
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | kdesc.tid,
- &msg->u.head[1]);
- writel(i2o_config_driver.context, &msg->u.head[2]);
- writel(i2o_cntxt_list_add(c, fp->private_data), &msg->u.head[3]);
- writel(kdesc.evt_mask, &msg->body[0]);
+ msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 |
+ kdesc.tid);
+ msg->u.head[2] = cpu_to_le32(i2o_config_driver.context);
+ msg->u.head[3] = cpu_to_le32(i2o_cntxt_list_add(c, fp->private_data));
+ msg->body[0] = cpu_to_le32(kdesc.evt_mask);
- i2o_msg_post(c, m);
+ i2o_msg_post(c, msg);
return 0;
}
@@ -537,7 +536,6 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
u32 sg_index = 0;
i2o_status_block *sb;
struct i2o_message *msg;
- u32 m;
unsigned int iop;
cmd = (struct i2o_cmd_passthru32 __user *)arg;
@@ -553,7 +551,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
return -ENXIO;
}
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
sb = c->status_block.virt;
@@ -595,8 +593,8 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
sg_offset = (msg->u.head[0] >> 4) & 0x0f;
- writel(i2o_config_driver.context, &msg->u.s.icntxt);
- writel(i2o_cntxt_list_add(c, reply), &msg->u.s.tcntxt);
+ msg->u.s.icntxt = cpu_to_le32(i2o_config_driver.context);
+ msg->u.s.tcntxt = cpu_to_le32(i2o_cntxt_list_add(c, reply));
memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE);
if (sg_offset) {
@@ -662,7 +660,7 @@ static int i2o_cfg_passthru32(struct file *file, unsigned cmnd,
}
}
- rcode = i2o_msg_post_wait(c, m, 60);
+ rcode = i2o_msg_post_wait(c, msg, 60);
if (rcode)
goto sg_list_cleanup;
@@ -780,8 +778,7 @@ static int i2o_cfg_passthru(unsigned long arg)
u32 i = 0;
void *p = NULL;
i2o_status_block *sb;
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
unsigned int iop;
if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
@@ -793,7 +790,7 @@ static int i2o_cfg_passthru(unsigned long arg)
return -ENXIO;
}
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
sb = c->status_block.virt;
@@ -830,8 +827,8 @@ static int i2o_cfg_passthru(unsigned long arg)
sg_offset = (msg->u.head[0] >> 4) & 0x0f;
- writel(i2o_config_driver.context, &msg->u.s.icntxt);
- writel(i2o_cntxt_list_add(c, reply), &msg->u.s.tcntxt);
+ msg->u.s.icntxt = cpu_to_le32(i2o_config_driver.context);
+ msg->u.s.tcntxt = cpu_to_le32(i2o_cntxt_list_add(c, reply));
memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE);
if (sg_offset) {
@@ -894,7 +891,7 @@ static int i2o_cfg_passthru(unsigned long arg)
}
}
- rcode = i2o_msg_post_wait(c, m, 60);
+ rcode = i2o_msg_post_wait(c, msg, 60);
if (rcode)
goto sg_list_cleanup;
diff --git a/drivers/message/i2o/i2o_scsi.c b/drivers/message/i2o/i2o_scsi.c
index 9f1744c..7a784fd 100644
--- a/drivers/message/i2o/i2o_scsi.c
+++ b/drivers/message/i2o/i2o_scsi.c
@@ -510,8 +510,7 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
struct i2o_controller *c;
struct i2o_device *i2o_dev;
int tid;
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
/*
* ENABLE_DISCONNECT
* SIMPLE_TAG
@@ -519,7 +518,7 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
*/
u32 scsi_flags = 0x20a00000;
u32 sgl_offset;
- u32 __iomem *mptr;
+ u32 *mptr;
u32 cmd = I2O_CMD_SCSI_EXEC << 24;
int rc = 0;
@@ -576,8 +575,8 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
* throw it back to the scsi layer
*/
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY) {
+ msg = i2o_msg_get(c);
+ if (IS_ERR(msg)) {
rc = SCSI_MLQUEUE_HOST_BUSY;
goto exit;
}
@@ -617,16 +616,16 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
if (sgl_offset == SGL_OFFSET_10)
sgl_offset = SGL_OFFSET_12;
cmd = I2O_CMD_PRIVATE << 24;
- writel(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC, mptr++);
- writel(adpt_flags | tid, mptr++);
+ *mptr++ = cpu_to_le32(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC);
+ *mptr++ = cpu_to_le32(adpt_flags | tid);
}
#endif
- writel(cmd | HOST_TID << 12 | tid, &msg->u.head[1]);
- writel(i2o_scsi_driver.context, &msg->u.s.icntxt);
+ msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid);
+ msg->u.s.icntxt = cpu_to_le32(i2o_scsi_driver.context);
/* We want the SCSI control block back */
- writel(i2o_cntxt_list_add(c, SCpnt), &msg->u.s.tcntxt);
+ msg->u.s.tcntxt = cpu_to_le32(i2o_cntxt_list_add(c, SCpnt));
/* LSI_920_PCI_QUIRK
*
@@ -649,15 +648,15 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
}
*/
- writel(scsi_flags | SCpnt->cmd_len, mptr++);
+ *mptr++ = cpu_to_le32(scsi_flags | SCpnt->cmd_len);
/* Write SCSI command into the message - always 16 byte block */
- memcpy_toio(mptr, SCpnt->cmnd, 16);
+ memcpy(mptr, SCpnt->cmnd, 16);
mptr += 4;
if (sgl_offset != SGL_OFFSET_0) {
/* write size of data addressed by SGL */
- writel(SCpnt->request_bufflen, mptr++);
+ *mptr++ = cpu_to_le32(SCpnt->request_bufflen);
/* Now fill in the SGList and command */
if (SCpnt->use_sg) {
@@ -676,11 +675,11 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
}
/* Stick the headers on */
- writel(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | sgl_offset,
- &msg->u.head[0]);
+ msg->u.head[0] =
+ cpu_to_le32(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | sgl_offset);
/* Queue the message */
- i2o_msg_post(c, m);
+ i2o_msg_post(c, msg);
osm_debug("Issued %ld\n", SCpnt->serial_number);
@@ -688,7 +687,7 @@ static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
nomem:
rc = -ENOMEM;
- i2o_msg_nop(c, m);
+ i2o_msg_nop(c, msg);
exit:
return rc;
@@ -709,8 +708,7 @@ static int i2o_scsi_abort(struct scsi_cmnd *SCpnt)
{
struct i2o_device *i2o_dev;
struct i2o_controller *c;
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
int tid;
int status = FAILED;
@@ -720,16 +718,16 @@ static int i2o_scsi_abort(struct scsi_cmnd *SCpnt)
c = i2o_dev->iop;
tid = i2o_dev->lct_data.tid;
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
return SCSI_MLQUEUE_HOST_BUSY;
- writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_SCSI_ABORT << 24 | HOST_TID << 12 | tid,
- &msg->u.head[1]);
- writel(i2o_cntxt_list_get_ptr(c, SCpnt), &msg->body[0]);
+ msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_SCSI_ABORT << 24 | HOST_TID << 12 | tid);
+ msg->body[0] = cpu_to_le32(i2o_cntxt_list_get_ptr(c, SCpnt));
- if (i2o_msg_post_wait(c, m, I2O_TIMEOUT_SCSI_SCB_ABORT))
+ if (i2o_msg_post_wait(c, msg, I2O_TIMEOUT_SCSI_SCB_ABORT))
status = SUCCESS;
return status;
diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
index 4eb5325..f86abb4 100644
--- a/drivers/message/i2o/iop.c
+++ b/drivers/message/i2o/iop.c
@@ -47,27 +47,6 @@ static struct i2o_dma i2o_systab;
static int i2o_hrt_get(struct i2o_controller *c);
/**
- * i2o_msg_nop - Returns a message which is not used
- * @c: I2O controller from which the message was created
- * @m: message which should be returned
- *
- * If you fetch a message via i2o_msg_get, and can't use it, you must
- * return the message with this function. Otherwise the message frame
- * is lost.
- */
-void i2o_msg_nop(struct i2o_controller *c, u32 m)
-{
- struct i2o_message __iomem *msg = i2o_msg_in_to_virt(c, m);
-
- writel(THREE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | ADAPTER_TID,
- &msg->u.head[1]);
- writel(0, &msg->u.head[2]);
- writel(0, &msg->u.head[3]);
- i2o_msg_post(c, m);
-};
-
-/**
* i2o_msg_get_wait - obtain an I2O message from the IOP
* @c: I2O controller
* @msg: pointer to a I2O message pointer
@@ -81,22 +60,21 @@ void i2o_msg_nop(struct i2o_controller *c, u32 m)
* address from the read port (see the i2o spec). If no message is
* available returns I2O_QUEUE_EMPTY and msg is leaved untouched.
*/
-u32 i2o_msg_get_wait(struct i2o_controller *c,
- struct i2o_message __iomem ** msg, int wait)
+struct i2o_message *i2o_msg_get_wait(struct i2o_controller *c, int wait)
{
unsigned long timeout = jiffies + wait * HZ;
- u32 m;
+ struct i2o_message *msg;
- while ((m = i2o_msg_get(c, msg)) == I2O_QUEUE_EMPTY) {
+ while (IS_ERR(msg = i2o_msg_get(c))) {
if (time_after(jiffies, timeout)) {
osm_debug("%s: Timeout waiting for message frame.\n",
c->name);
- return I2O_QUEUE_EMPTY;
+ return ERR_PTR(-ETIMEDOUT);
}
schedule_timeout_uninterruptible(1);
}
- return m;
+ return msg;
};
#if BITS_PER_LONG == 64
@@ -301,8 +279,7 @@ struct i2o_device *i2o_iop_find_device(struct i2o_controller *c, u16 tid)
*/
static int i2o_iop_quiesce(struct i2o_controller *c)
{
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
i2o_status_block *sb = c->status_block.virt;
int rc;
@@ -313,16 +290,17 @@ static int i2o_iop_quiesce(struct i2o_controller *c)
(sb->iop_state != ADAPTER_STATE_OPERATIONAL))
return 0;
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_SYS_QUIESCE << 24 | HOST_TID << 12 | ADAPTER_TID,
- &msg->u.head[1]);
+ msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_SYS_QUIESCE << 24 | HOST_TID << 12 |
+ ADAPTER_TID);
/* Long timeout needed for quiesce if lots of devices */
- if ((rc = i2o_msg_post_wait(c, m, 240)))
+ if ((rc = i2o_msg_post_wait(c, msg, 240)))
osm_info("%s: Unable to quiesce (status=%#x).\n", c->name, -rc);
else
osm_debug("%s: Quiesced.\n", c->name);
@@ -342,8 +320,7 @@ static int i2o_iop_quiesce(struct i2o_controller *c)
*/
static int i2o_iop_enable(struct i2o_controller *c)
{
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
i2o_status_block *sb = c->status_block.virt;
int rc;
@@ -353,16 +330,17 @@ static int i2o_iop_enable(struct i2o_controller *c)
if (sb->iop_state != ADAPTER_STATE_READY)
return -EINVAL;
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_SYS_ENABLE << 24 | HOST_TID << 12 | ADAPTER_TID,
- &msg->u.head[1]);
+ msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_SYS_ENABLE << 24 | HOST_TID << 12 |
+ ADAPTER_TID);
/* How long of a timeout do we need? */
- if ((rc = i2o_msg_post_wait(c, m, 240)))
+ if ((rc = i2o_msg_post_wait(c, msg, 240)))
osm_err("%s: Could not enable (status=%#x).\n", c->name, -rc);
else
osm_debug("%s: Enabled.\n", c->name);
@@ -413,22 +391,22 @@ static inline void i2o_iop_enable_all(void)
*/
static int i2o_iop_clear(struct i2o_controller *c)
{
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
int rc;
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
/* Quiesce all IOPs first */
i2o_iop_quiesce_all();
- writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_ADAPTER_CLEAR << 24 | HOST_TID << 12 | ADAPTER_TID,
- &msg->u.head[1]);
+ msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_ADAPTER_CLEAR << 24 | HOST_TID << 12 |
+ ADAPTER_TID);
- if ((rc = i2o_msg_post_wait(c, m, 30)))
+ if ((rc = i2o_msg_post_wait(c, msg, 30)))
osm_info("%s: Unable to clear (status=%#x).\n", c->name, -rc);
else
osm_debug("%s: Cleared.\n", c->name);
@@ -446,13 +424,13 @@ static int i2o_iop_clear(struct i2o_controller *c)
* Clear and (re)initialize IOP's outbound queue and post the message
* frames to the IOP.
*
- * Returns 0 on success or a negative errno code on failure.
+ * Returns 0 on success or negative error code on failure.
*/
static int i2o_iop_init_outbound_queue(struct i2o_controller *c)
{
- volatile u8 *status = c->status.virt;
u32 m;
- struct i2o_message __iomem *msg;
+ volatile u8 *status = c->status.virt;
+ struct i2o_message *msg;
ulong timeout;
int i;
@@ -460,23 +438,24 @@ static int i2o_iop_init_outbound_queue(struct i2o_controller *c)
memset(c->status.virt, 0, 4);
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
-
- writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6, &msg->u.head[0]);
- writel(I2O_CMD_OUTBOUND_INIT << 24 | HOST_TID << 12 | ADAPTER_TID,
- &msg->u.head[1]);
- writel(i2o_exec_driver.context, &msg->u.s.icntxt);
- writel(0x00000000, &msg->u.s.tcntxt);
- writel(PAGE_SIZE, &msg->body[0]);
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+ msg->u.head[0] = cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_OUTBOUND_INIT << 24 | HOST_TID << 12 |
+ ADAPTER_TID);
+ msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
+ msg->u.s.tcntxt = cpu_to_le32(0x00000000);
+ msg->body[0] = cpu_to_le32(PAGE_SIZE);
/* Outbound msg frame size in words and Initcode */
- writel(I2O_OUTBOUND_MSG_FRAME_SIZE << 16 | 0x80, &msg->body[1]);
- writel(0xd0000004, &msg->body[2]);
- writel(i2o_dma_low(c->status.phys), &msg->body[3]);
- writel(i2o_dma_high(c->status.phys), &msg->body[4]);
+ msg->body[1] = cpu_to_le32(I2O_OUTBOUND_MSG_FRAME_SIZE << 16 | 0x80);
+ msg->body[2] = cpu_to_le32(0xd0000004);
+ msg->body[3] = cpu_to_le32(i2o_dma_low(c->status.phys));
+ msg->body[4] = cpu_to_le32(i2o_dma_high(c->status.phys));
- i2o_msg_post(c, m);
+ i2o_msg_post(c, msg);
timeout = jiffies + I2O_TIMEOUT_INIT_OUTBOUND_QUEUE * HZ;
while (*status <= I2O_CMD_IN_PROGRESS) {
@@ -511,34 +490,34 @@ static int i2o_iop_init_outbound_queue(struct i2o_controller *c)
static int i2o_iop_reset(struct i2o_controller *c)
{
volatile u8 *status = c->status.virt;
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
unsigned long timeout;
i2o_status_block *sb = c->status_block.virt;
int rc = 0;
osm_debug("%s: Resetting controller\n", c->name);
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
memset(c->status_block.virt, 0, 8);
/* Quiesce all IOPs first */
i2o_iop_quiesce_all();
- writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_ADAPTER_RESET << 24 | HOST_TID << 12 | ADAPTER_TID,
- &msg->u.head[1]);
- writel(i2o_exec_driver.context, &msg->u.s.icntxt);
- writel(0, &msg->u.s.tcntxt); //FIXME: use reasonable transaction context
- writel(0, &msg->body[0]);
- writel(0, &msg->body[1]);
- writel(i2o_dma_low(c->status.phys), &msg->body[2]);
- writel(i2o_dma_high(c->status.phys), &msg->body[3]);
+ msg->u.head[0] = cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_ADAPTER_RESET << 24 | HOST_TID << 12 |
+ ADAPTER_TID);
+ msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
+ msg->u.s.tcntxt = cpu_to_le32(0x00000000);
+ msg->body[0] = cpu_to_le32(0x00000000);
+ msg->body[1] = cpu_to_le32(0x00000000);
+ msg->body[2] = cpu_to_le32(i2o_dma_low(c->status.phys));
+ msg->body[3] = cpu_to_le32(i2o_dma_high(c->status.phys));
- i2o_msg_post(c, m);
+ i2o_msg_post(c, msg);
/* Wait for a reply */
timeout = jiffies + I2O_TIMEOUT_RESET * HZ;
@@ -567,18 +546,15 @@ static int i2o_iop_reset(struct i2o_controller *c)
osm_debug("%s: Reset in progress, waiting for reboot...\n",
c->name);
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_RESET);
- while (m == I2O_QUEUE_EMPTY) {
+ while (IS_ERR(msg = i2o_msg_get_wait(c, I2O_TIMEOUT_RESET))) {
if (time_after(jiffies, timeout)) {
osm_err("%s: IOP reset timeout.\n", c->name);
- rc = -ETIMEDOUT;
+ rc = PTR_ERR(msg);
goto exit;
}
schedule_timeout_uninterruptible(1);
-
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_RESET);
}
- i2o_msg_nop(c, m);
+ i2o_msg_nop(c, msg);
/* from here all quiesce commands are safe */
c->no_quiesce = 0;
@@ -686,8 +662,7 @@ static int i2o_iop_activate(struct i2o_controller *c)
*/
static int i2o_iop_systab_set(struct i2o_controller *c)
{
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
i2o_status_block *sb = c->status_block.virt;
struct device *dev = &c->pdev->dev;
struct resource *root;
@@ -735,20 +710,21 @@ static int i2o_iop_systab_set(struct i2o_controller *c)
}
}
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
i2o_systab.phys = dma_map_single(dev, i2o_systab.virt, i2o_systab.len,
PCI_DMA_TODEVICE);
if (!i2o_systab.phys) {
- i2o_msg_nop(c, m);
+ i2o_msg_nop(c, msg);
return -ENOMEM;
}
- writel(I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6, &msg->u.head[0]);
- writel(I2O_CMD_SYS_TAB_SET << 24 | HOST_TID << 12 | ADAPTER_TID,
- &msg->u.head[1]);
+ msg->u.head[0] = cpu_to_le32(I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_SYS_TAB_SET << 24 | HOST_TID << 12 |
+ ADAPTER_TID);
/*
* Provide three SGL-elements:
@@ -760,16 +736,16 @@ static int i2o_iop_systab_set(struct i2o_controller *c)
* same table to everyone. We have to go remap it for them all
*/
- writel(c->unit + 2, &msg->body[0]);
- writel(0, &msg->body[1]);
- writel(0x54000000 | i2o_systab.len, &msg->body[2]);
- writel(i2o_systab.phys, &msg->body[3]);
- writel(0x54000000 | sb->current_mem_size, &msg->body[4]);
- writel(sb->current_mem_base, &msg->body[5]);
- writel(0xd4000000 | sb->current_io_size, &msg->body[6]);
- writel(sb->current_io_base, &msg->body[6]);
+ msg->body[0] = cpu_to_le32(c->unit + 2);
+ msg->body[1] = cpu_to_le32(0x00000000);
+ msg->body[2] = cpu_to_le32(0x54000000 | i2o_systab.len);
+ msg->body[3] = cpu_to_le32(i2o_systab.phys);
+ msg->body[4] = cpu_to_le32(0x54000000 | sb->current_mem_size);
+ msg->body[5] = cpu_to_le32(sb->current_mem_base);
+ msg->body[6] = cpu_to_le32(0xd4000000 | sb->current_io_size);
+ msg->body[6] = cpu_to_le32(sb->current_io_base);
- rc = i2o_msg_post_wait(c, m, 120);
+ rc = i2o_msg_post_wait(c, msg, 120);
dma_unmap_single(dev, i2o_systab.phys, i2o_systab.len,
PCI_DMA_TODEVICE);
@@ -952,30 +928,30 @@ static int i2o_parse_hrt(struct i2o_controller *c)
*/
int i2o_status_get(struct i2o_controller *c)
{
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
volatile u8 *status_block;
unsigned long timeout;
status_block = (u8 *) c->status_block.virt;
memset(c->status_block.virt, 0, sizeof(i2o_status_block));
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_STATUS_GET << 24 | HOST_TID << 12 | ADAPTER_TID,
- &msg->u.head[1]);
- writel(i2o_exec_driver.context, &msg->u.s.icntxt);
- writel(0, &msg->u.s.tcntxt); // FIXME: use resonable transaction context
- writel(0, &msg->body[0]);
- writel(0, &msg->body[1]);
- writel(i2o_dma_low(c->status_block.phys), &msg->body[2]);
- writel(i2o_dma_high(c->status_block.phys), &msg->body[3]);
- writel(sizeof(i2o_status_block), &msg->body[4]); /* always 88 bytes */
+ msg->u.head[0] = cpu_to_le32(NINE_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_STATUS_GET << 24 | HOST_TID << 12 |
+ ADAPTER_TID);
+ msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
+ msg->u.s.tcntxt = cpu_to_le32(0x00000000);
+ msg->body[0] = cpu_to_le32(0x00000000);
+ msg->body[1] = cpu_to_le32(0x00000000);
+ msg->body[2] = cpu_to_le32(i2o_dma_low(c->status_block.phys));
+ msg->body[3] = cpu_to_le32(i2o_dma_high(c->status_block.phys));
+ msg->body[4] = cpu_to_le32(sizeof(i2o_status_block)); /* always 88 bytes */
- i2o_msg_post(c, m);
+ i2o_msg_post(c, msg);
/* Wait for a reply */
timeout = jiffies + I2O_TIMEOUT_STATUS_GET * HZ;
@@ -1013,20 +989,20 @@ static int i2o_hrt_get(struct i2o_controller *c)
struct device *dev = &c->pdev->dev;
for (i = 0; i < I2O_HRT_GET_TRIES; i++) {
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- writel(SIX_WORD_MSG_SIZE | SGL_OFFSET_4, &msg->u.head[0]);
- writel(I2O_CMD_HRT_GET << 24 | HOST_TID << 12 | ADAPTER_TID,
- &msg->u.head[1]);
- writel(0xd0000000 | c->hrt.len, &msg->body[0]);
- writel(c->hrt.phys, &msg->body[1]);
+ msg->u.head[0] = cpu_to_le32(SIX_WORD_MSG_SIZE | SGL_OFFSET_4);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_HRT_GET << 24 | HOST_TID << 12 |
+ ADAPTER_TID);
+ msg->body[0] = cpu_to_le32(0xd0000000 | c->hrt.len);
+ msg->body[1] = cpu_to_le32(c->hrt.phys);
- rc = i2o_msg_post_wait_mem(c, m, 20, &c->hrt);
+ rc = i2o_msg_post_wait_mem(c, msg, 20, &c->hrt);
if (rc < 0) {
osm_err("%s: Unable to get HRT (status=%#x)\n", c->name,
@@ -1056,6 +1032,7 @@ static int i2o_hrt_get(struct i2o_controller *c)
*/
void i2o_iop_free(struct i2o_controller *c)
{
+ i2o_pool_free(&c->in_msg);
kfree(c);
};
@@ -1080,7 +1057,7 @@ static struct class *i2o_controller_class;
* i2o_iop_alloc - Allocate and initialize a i2o_controller struct
*
* Allocate the necessary memory for a i2o_controller struct and
- * initialize the lists.
+ * initialize the lists and message mempool.
*
* Returns a pointer to the I2O controller or a negative error code on
* failure.
@@ -1089,6 +1066,7 @@ struct i2o_controller *i2o_iop_alloc(void)
{
static int unit = 0; /* 0 and 1 are NULL IOP and Local Host */
struct i2o_controller *c;
+ char poolname[32];
c = kmalloc(sizeof(*c), GFP_KERNEL);
if (!c) {
@@ -1098,11 +1076,20 @@ struct i2o_controller *i2o_iop_alloc(void)
}
memset(c, 0, sizeof(*c));
+ c->unit = unit++;
+ sprintf(c->name, "iop%d", c->unit);
+
+ snprintf(poolname, sizeof(poolname), "i2o_%s_msg_inpool", c->name);
+ if (i2o_pool_alloc
+ (&c->in_msg, poolname, I2O_INBOUND_MSG_FRAME_SIZE * 4,
+ I2O_MSG_INPOOL_MIN)) {
+ kfree(c);
+ return ERR_PTR(-ENOMEM);
+ };
+
INIT_LIST_HEAD(&c->devices);
spin_lock_init(&c->lock);
init_MUTEX(&c->lct_lock);
- c->unit = unit++;
- sprintf(c->name, "iop%d", c->unit);
device_initialize(&c->device);
@@ -1199,28 +1186,27 @@ int i2o_iop_add(struct i2o_controller *c)
* is waited for, or expected. If you do not want further notifications,
* call the i2o_event_register again with a evt_mask of 0.
*
- * Returns 0 on success or -ETIMEDOUT if no message could be fetched for
- * sending the request.
+ * Returns 0 on success or negative error code on failure.
*/
int i2o_event_register(struct i2o_device *dev, struct i2o_driver *drv,
int tcntxt, u32 evt_mask)
{
struct i2o_controller *c = dev->iop;
- struct i2o_message __iomem *msg;
- u32 m;
+ struct i2o_message *msg;
- m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
- if (m == I2O_QUEUE_EMPTY)
- return -ETIMEDOUT;
+ msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
- writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
- writel(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | dev->lct_data.
- tid, &msg->u.head[1]);
- writel(drv->context, &msg->u.s.icntxt);
- writel(tcntxt, &msg->u.s.tcntxt);
- writel(evt_mask, &msg->body[0]);
+ msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
+ msg->u.head[1] =
+ cpu_to_le32(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | dev->
+ lct_data.tid);
+ msg->u.s.icntxt = cpu_to_le32(drv->context);
+ msg->u.s.tcntxt = cpu_to_le32(tcntxt);
+ msg->body[0] = cpu_to_le32(evt_mask);
- i2o_msg_post(c, m);
+ i2o_msg_post(c, msg);
return 0;
};
diff --git a/drivers/message/i2o/pci.c b/drivers/message/i2o/pci.c
index ee7075f..329d482 100644
--- a/drivers/message/i2o/pci.c
+++ b/drivers/message/i2o/pci.c
@@ -483,4 +483,5 @@ void __exit i2o_pci_exit(void)
{
pci_unregister_driver(&i2o_pci_driver);
};
+
MODULE_DEVICE_TABLE(pci, i2o_pci_ids);
diff --git a/include/linux/i2o.h b/include/linux/i2o.h
index d79c8a4..9e359a9 100644
--- a/include/linux/i2o.h
+++ b/include/linux/i2o.h
@@ -30,6 +30,7 @@
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/workqueue.h> /* work_struct */
+#include <linux/mempool.h>
#include <asm/io.h>
#include <asm/semaphore.h> /* Needed for MUTEX init macros */
@@ -38,6 +39,355 @@
#define I2O_QUEUE_EMPTY 0xffffffff
/*
+ * Cache strategies
+ */
+
+/* The NULL strategy leaves everything up to the controller. This tends to be a
+ * pessimal but functional choice.
+ */
+#define CACHE_NULL 0
+/* Prefetch data when reading. We continually attempt to load the next 32 sectors
+ * into the controller cache.
+ */
+#define CACHE_PREFETCH 1
+/* Prefetch data when reading. We sometimes attempt to load the next 32 sectors
+ * into the controller cache. When an I/O is less <= 8K we assume its probably
+ * not sequential and don't prefetch (default)
+ */
+#define CACHE_SMARTFETCH 2
+/* Data is written to the cache and then out on to the disk. The I/O must be
+ * physically on the medium before the write is acknowledged (default without
+ * NVRAM)
+ */
+#define CACHE_WRITETHROUGH 17
+/* Data is written to the cache and then out on to the disk. The controller
+ * is permitted to write back the cache any way it wants. (default if battery
+ * backed NVRAM is present). It can be useful to set this for swap regardless of
+ * battery state.
+ */
+#define CACHE_WRITEBACK 18
+/* Optimise for under powered controllers, especially on RAID1 and RAID0. We
+ * write large I/O's directly to disk bypassing the cache to avoid the extra
+ * memory copy hits. Small writes are writeback cached
+ */
+#define CACHE_SMARTBACK 19
+/* Optimise for under powered controllers, especially on RAID1 and RAID0. We
+ * write large I/O's directly to disk bypassing the cache to avoid the extra
+ * memory copy hits. Small writes are writethrough cached. Suitable for devices
+ * lacking battery backup
+ */
+#define CACHE_SMARTTHROUGH 20
+
+/*
+ * Ioctl structures
+ */
+
+#define BLKI2OGRSTRAT _IOR('2', 1, int)
+#define BLKI2OGWSTRAT _IOR('2', 2, int)
+#define BLKI2OSRSTRAT _IOW('2', 3, int)
+#define BLKI2OSWSTRAT _IOW('2', 4, int)
+
+/*
+ * I2O Function codes
+ */
+
+/*
+ * Executive Class
+ */
+#define I2O_CMD_ADAPTER_ASSIGN 0xB3
+#define I2O_CMD_ADAPTER_READ 0xB2
+#define I2O_CMD_ADAPTER_RELEASE 0xB5
+#define I2O_CMD_BIOS_INFO_SET 0xA5
+#define I2O_CMD_BOOT_DEVICE_SET 0xA7
+#define I2O_CMD_CONFIG_VALIDATE 0xBB
+#define I2O_CMD_CONN_SETUP 0xCA
+#define I2O_CMD_DDM_DESTROY 0xB1
+#define I2O_CMD_DDM_ENABLE 0xD5
+#define I2O_CMD_DDM_QUIESCE 0xC7
+#define I2O_CMD_DDM_RESET 0xD9
+#define I2O_CMD_DDM_SUSPEND 0xAF
+#define I2O_CMD_DEVICE_ASSIGN 0xB7
+#define I2O_CMD_DEVICE_RELEASE 0xB9
+#define I2O_CMD_HRT_GET 0xA8
+#define I2O_CMD_ADAPTER_CLEAR 0xBE
+#define I2O_CMD_ADAPTER_CONNECT 0xC9
+#define I2O_CMD_ADAPTER_RESET 0xBD
+#define I2O_CMD_LCT_NOTIFY 0xA2
+#define I2O_CMD_OUTBOUND_INIT 0xA1
+#define I2O_CMD_PATH_ENABLE 0xD3
+#define I2O_CMD_PATH_QUIESCE 0xC5
+#define I2O_CMD_PATH_RESET 0xD7
+#define I2O_CMD_STATIC_MF_CREATE 0xDD
+#define I2O_CMD_STATIC_MF_RELEASE 0xDF
+#define I2O_CMD_STATUS_GET 0xA0
+#define I2O_CMD_SW_DOWNLOAD 0xA9
+#define I2O_CMD_SW_UPLOAD 0xAB
+#define I2O_CMD_SW_REMOVE 0xAD
+#define I2O_CMD_SYS_ENABLE 0xD1
+#define I2O_CMD_SYS_MODIFY 0xC1
+#define I2O_CMD_SYS_QUIESCE 0xC3
+#define I2O_CMD_SYS_TAB_SET 0xA3
+
+/*
+ * Utility Class
+ */
+#define I2O_CMD_UTIL_NOP 0x00
+#define I2O_CMD_UTIL_ABORT 0x01
+#define I2O_CMD_UTIL_CLAIM 0x09
+#define I2O_CMD_UTIL_RELEASE 0x0B
+#define I2O_CMD_UTIL_PARAMS_GET 0x06
+#define I2O_CMD_UTIL_PARAMS_SET 0x05
+#define I2O_CMD_UTIL_EVT_REGISTER 0x13
+#define I2O_CMD_UTIL_EVT_ACK 0x14
+#define I2O_CMD_UTIL_CONFIG_DIALOG 0x10
+#define I2O_CMD_UTIL_DEVICE_RESERVE 0x0D
+#define I2O_CMD_UTIL_DEVICE_RELEASE 0x0F
+#define I2O_CMD_UTIL_LOCK 0x17
+#define I2O_CMD_UTIL_LOCK_RELEASE 0x19
+#define I2O_CMD_UTIL_REPLY_FAULT_NOTIFY 0x15
+
+/*
+ * SCSI Host Bus Adapter Class
+ */
+#define I2O_CMD_SCSI_EXEC 0x81
+#define I2O_CMD_SCSI_ABORT 0x83
+#define I2O_CMD_SCSI_BUSRESET 0x27
+
+/*
+ * Bus Adapter Class
+ */
+#define I2O_CMD_BUS_ADAPTER_RESET 0x85
+#define I2O_CMD_BUS_RESET 0x87
+#define I2O_CMD_BUS_SCAN 0x89
+#define I2O_CMD_BUS_QUIESCE 0x8b
+
+/*
+ * Random Block Storage Class
+ */
+#define I2O_CMD_BLOCK_READ 0x30
+#define I2O_CMD_BLOCK_WRITE 0x31
+#define I2O_CMD_BLOCK_CFLUSH 0x37
+#define I2O_CMD_BLOCK_MLOCK 0x49
+#define I2O_CMD_BLOCK_MUNLOCK 0x4B
+#define I2O_CMD_BLOCK_MMOUNT 0x41
+#define I2O_CMD_BLOCK_MEJECT 0x43
+#define I2O_CMD_BLOCK_POWER 0x70
+
+#define I2O_CMD_PRIVATE 0xFF
+
+/* Command status values */
+
+#define I2O_CMD_IN_PROGRESS 0x01
+#define I2O_CMD_REJECTED 0x02
+#define I2O_CMD_FAILED 0x03
+#define I2O_CMD_COMPLETED 0x04
+
+/* I2O API function return values */
+
+#define I2O_RTN_NO_ERROR 0
+#define I2O_RTN_NOT_INIT 1
+#define I2O_RTN_FREE_Q_EMPTY 2
+#define I2O_RTN_TCB_ERROR 3
+#define I2O_RTN_TRANSACTION_ERROR 4
+#define I2O_RTN_ADAPTER_ALREADY_INIT 5
+#define I2O_RTN_MALLOC_ERROR 6
+#define I2O_RTN_ADPTR_NOT_REGISTERED 7
+#define I2O_RTN_MSG_REPLY_TIMEOUT 8
+#define I2O_RTN_NO_STATUS 9
+#define I2O_RTN_NO_FIRM_VER 10
+#define I2O_RTN_NO_LINK_SPEED 11
+
+/* Reply message status defines for all messages */
+
+#define I2O_REPLY_STATUS_SUCCESS 0x00
+#define I2O_REPLY_STATUS_ABORT_DIRTY 0x01
+#define I2O_REPLY_STATUS_ABORT_NO_DATA_TRANSFER 0x02
+#define I2O_REPLY_STATUS_ABORT_PARTIAL_TRANSFER 0x03
+#define I2O_REPLY_STATUS_ERROR_DIRTY 0x04
+#define I2O_REPLY_STATUS_ERROR_NO_DATA_TRANSFER 0x05
+#define I2O_REPLY_STATUS_ERROR_PARTIAL_TRANSFER 0x06
+#define I2O_REPLY_STATUS_PROCESS_ABORT_DIRTY 0x08
+#define I2O_REPLY_STATUS_PROCESS_ABORT_NO_DATA_TRANSFER 0x09
+#define I2O_REPLY_STATUS_PROCESS_ABORT_PARTIAL_TRANSFER 0x0A
+#define I2O_REPLY_STATUS_TRANSACTION_ERROR 0x0B
+#define I2O_REPLY_STATUS_PROGRESS_REPORT 0x80
+
+/* Status codes and Error Information for Parameter functions */
+
+#define I2O_PARAMS_STATUS_SUCCESS 0x00
+#define I2O_PARAMS_STATUS_BAD_KEY_ABORT 0x01
+#define I2O_PARAMS_STATUS_BAD_KEY_CONTINUE 0x02
+#define I2O_PARAMS_STATUS_BUFFER_FULL 0x03
+#define I2O_PARAMS_STATUS_BUFFER_TOO_SMALL 0x04
+#define I2O_PARAMS_STATUS_FIELD_UNREADABLE 0x05
+#define I2O_PARAMS_STATUS_FIELD_UNWRITEABLE 0x06
+#define I2O_PARAMS_STATUS_INSUFFICIENT_FIELDS 0x07
+#define I2O_PARAMS_STATUS_INVALID_GROUP_ID 0x08
+#define I2O_PARAMS_STATUS_INVALID_OPERATION 0x09
+#define I2O_PARAMS_STATUS_NO_KEY_FIELD 0x0A
+#define I2O_PARAMS_STATUS_NO_SUCH_FIELD 0x0B
+#define I2O_PARAMS_STATUS_NON_DYNAMIC_GROUP 0x0C
+#define I2O_PARAMS_STATUS_OPERATION_ERROR 0x0D
+#define I2O_PARAMS_STATUS_SCALAR_ERROR 0x0E
+#define I2O_PARAMS_STATUS_TABLE_ERROR 0x0F
+#define I2O_PARAMS_STATUS_WRONG_GROUP_TYPE 0x10
+
+/* DetailedStatusCode defines for Executive, DDM, Util and Transaction error
+ * messages: Table 3-2 Detailed Status Codes.*/
+
+#define I2O_DSC_SUCCESS 0x0000
+#define I2O_DSC_BAD_KEY 0x0002
+#define I2O_DSC_TCL_ERROR 0x0003
+#define I2O_DSC_REPLY_BUFFER_FULL 0x0004
+#define I2O_DSC_NO_SUCH_PAGE 0x0005
+#define I2O_DSC_INSUFFICIENT_RESOURCE_SOFT 0x0006
+#define I2O_DSC_INSUFFICIENT_RESOURCE_HARD 0x0007
+#define I2O_DSC_CHAIN_BUFFER_TOO_LARGE 0x0009
+#define I2O_DSC_UNSUPPORTED_FUNCTION 0x000A
+#define I2O_DSC_DEVICE_LOCKED 0x000B
+#define I2O_DSC_DEVICE_RESET 0x000C
+#define I2O_DSC_INAPPROPRIATE_FUNCTION 0x000D
+#define I2O_DSC_INVALID_INITIATOR_ADDRESS 0x000E
+#define I2O_DSC_INVALID_MESSAGE_FLAGS 0x000F
+#define I2O_DSC_INVALID_OFFSET 0x0010
+#define I2O_DSC_INVALID_PARAMETER 0x0011
+#define I2O_DSC_INVALID_REQUEST 0x0012
+#define I2O_DSC_INVALID_TARGET_ADDRESS 0x0013
+#define I2O_DSC_MESSAGE_TOO_LARGE 0x0014
+#define I2O_DSC_MESSAGE_TOO_SMALL 0x0015
+#define I2O_DSC_MISSING_PARAMETER 0x0016
+#define I2O_DSC_TIMEOUT 0x0017
+#define I2O_DSC_UNKNOWN_ERROR 0x0018
+#define I2O_DSC_UNKNOWN_FUNCTION 0x0019
+#define I2O_DSC_UNSUPPORTED_VERSION 0x001A
+#define I2O_DSC_DEVICE_BUSY 0x001B
+#define I2O_DSC_DEVICE_NOT_AVAILABLE 0x001C
+
+/* DetailedStatusCode defines for Block Storage Operation: Table 6-7 Detailed
+ Status Codes.*/
+
+#define I2O_BSA_DSC_SUCCESS 0x0000
+#define I2O_BSA_DSC_MEDIA_ERROR 0x0001
+#define I2O_BSA_DSC_ACCESS_ERROR 0x0002
+#define I2O_BSA_DSC_DEVICE_FAILURE 0x0003
+#define I2O_BSA_DSC_DEVICE_NOT_READY 0x0004
+#define I2O_BSA_DSC_MEDIA_NOT_PRESENT 0x0005
+#define I2O_BSA_DSC_MEDIA_LOCKED 0x0006
+#define I2O_BSA_DSC_MEDIA_FAILURE 0x0007
+#define I2O_BSA_DSC_PROTOCOL_FAILURE 0x0008
+#define I2O_BSA_DSC_BUS_FAILURE 0x0009
+#define I2O_BSA_DSC_ACCESS_VIOLATION 0x000A
+#define I2O_BSA_DSC_WRITE_PROTECTED 0x000B
+#define I2O_BSA_DSC_DEVICE_RESET 0x000C
+#define I2O_BSA_DSC_VOLUME_CHANGED 0x000D
+#define I2O_BSA_DSC_TIMEOUT 0x000E
+
+/* FailureStatusCodes, Table 3-3 Message Failure Codes */
+
+#define I2O_FSC_TRANSPORT_SERVICE_SUSPENDED 0x81
+#define I2O_FSC_TRANSPORT_SERVICE_TERMINATED 0x82
+#define I2O_FSC_TRANSPORT_CONGESTION 0x83
+#define I2O_FSC_TRANSPORT_FAILURE 0x84
+#define I2O_FSC_TRANSPORT_STATE_ERROR 0x85
+#define I2O_FSC_TRANSPORT_TIME_OUT 0x86
+#define I2O_FSC_TRANSPORT_ROUTING_FAILURE 0x87
+#define I2O_FSC_TRANSPORT_INVALID_VERSION 0x88
+#define I2O_FSC_TRANSPORT_INVALID_OFFSET 0x89
+#define I2O_FSC_TRANSPORT_INVALID_MSG_FLAGS 0x8A
+#define I2O_FSC_TRANSPORT_FRAME_TOO_SMALL 0x8B
+#define I2O_FSC_TRANSPORT_FRAME_TOO_LARGE 0x8C
+#define I2O_FSC_TRANSPORT_INVALID_TARGET_ID 0x8D
+#define I2O_FSC_TRANSPORT_INVALID_INITIATOR_ID 0x8E
+#define I2O_FSC_TRANSPORT_INVALID_INITIATOR_CONTEXT 0x8F
+#define I2O_FSC_TRANSPORT_UNKNOWN_FAILURE 0xFF
+
+/* Device Claim Types */
+#define I2O_CLAIM_PRIMARY 0x01000000
+#define I2O_CLAIM_MANAGEMENT 0x02000000
+#define I2O_CLAIM_AUTHORIZED 0x03000000
+#define I2O_CLAIM_SECONDARY 0x04000000
+
+/* Message header defines for VersionOffset */
+#define I2OVER15 0x0001
+#define I2OVER20 0x0002
+
+/* Default is 1.5 */
+#define I2OVERSION I2OVER15
+
+#define SGL_OFFSET_0 I2OVERSION
+#define SGL_OFFSET_4 (0x0040 | I2OVERSION)
+#define SGL_OFFSET_5 (0x0050 | I2OVERSION)
+#define SGL_OFFSET_6 (0x0060 | I2OVERSION)
+#define SGL_OFFSET_7 (0x0070 | I2OVERSION)
+#define SGL_OFFSET_8 (0x0080 | I2OVERSION)
+#define SGL_OFFSET_9 (0x0090 | I2OVERSION)
+#define SGL_OFFSET_10 (0x00A0 | I2OVERSION)
+#define SGL_OFFSET_11 (0x00B0 | I2OVERSION)
+#define SGL_OFFSET_12 (0x00C0 | I2OVERSION)
+#define SGL_OFFSET(x) (((x)<<4) | I2OVERSION)
+
+/* Transaction Reply Lists (TRL) Control Word structure */
+#define TRL_SINGLE_FIXED_LENGTH 0x00
+#define TRL_SINGLE_VARIABLE_LENGTH 0x40
+#define TRL_MULTIPLE_FIXED_LENGTH 0x80
+
+ /* msg header defines for MsgFlags */
+#define MSG_STATIC 0x0100
+#define MSG_64BIT_CNTXT 0x0200
+#define MSG_MULTI_TRANS 0x1000
+#define MSG_FAIL 0x2000
+#define MSG_FINAL 0x4000
+#define MSG_REPLY 0x8000
+
+ /* minimum size msg */
+#define THREE_WORD_MSG_SIZE 0x00030000
+#define FOUR_WORD_MSG_SIZE 0x00040000
+#define FIVE_WORD_MSG_SIZE 0x00050000
+#define SIX_WORD_MSG_SIZE 0x00060000
+#define SEVEN_WORD_MSG_SIZE 0x00070000
+#define EIGHT_WORD_MSG_SIZE 0x00080000
+#define NINE_WORD_MSG_SIZE 0x00090000
+#define TEN_WORD_MSG_SIZE 0x000A0000
+#define ELEVEN_WORD_MSG_SIZE 0x000B0000
+#define I2O_MESSAGE_SIZE(x) ((x)<<16)
+
+/* special TID assignments */
+#define ADAPTER_TID 0
+#define HOST_TID 1
+
+/* outbound queue defines */
+#define I2O_MAX_OUTBOUND_MSG_FRAMES 128
+#define I2O_OUTBOUND_MSG_FRAME_SIZE 128 /* in 32-bit words */
+
+/* inbound queue definitions */
+#define I2O_MSG_INPOOL_MIN 32
+#define I2O_INBOUND_MSG_FRAME_SIZE 128 /* in 32-bit words */
+
+#define I2O_POST_WAIT_OK 0
+#define I2O_POST_WAIT_TIMEOUT -ETIMEDOUT
+
+#define I2O_CONTEXT_LIST_MIN_LENGTH 15
+#define I2O_CONTEXT_LIST_USED 0x01
+#define I2O_CONTEXT_LIST_DELETED 0x02
+
+/* timeouts */
+#define I2O_TIMEOUT_INIT_OUTBOUND_QUEUE 15
+#define I2O_TIMEOUT_MESSAGE_GET 5
+#define I2O_TIMEOUT_RESET 30
+#define I2O_TIMEOUT_STATUS_GET 5
+#define I2O_TIMEOUT_LCT_GET 360
+#define I2O_TIMEOUT_SCSI_SCB_ABORT 240
+
+/* retries */
+#define I2O_HRT_GET_TRIES 3
+#define I2O_LCT_GET_TRIES 3
+
+/* defines for max_sectors and max_phys_segments */
+#define I2O_MAX_SECTORS 1024
+#define I2O_MAX_SECTORS_LIMITED 256
+#define I2O_MAX_PHYS_SEGMENTS MAX_PHYS_SEGMENTS
+
+/*
* Message structures
*/
struct i2o_message {
@@ -58,6 +408,12 @@ struct i2o_message {
u32 body[0];
};
+/* MFA and I2O message used by mempool */
+struct i2o_msg_mfa {
+ u32 mfa; /* MFA returned by the controller */
+ struct i2o_message msg; /* I2O message */
+};
+
/*
* Each I2O device entity has one of these. There is one per device.
*/
@@ -130,6 +486,15 @@ struct i2o_dma {
};
/*
+ * Contains slab cache and mempool information
+ */
+struct i2o_pool {
+ char *name;
+ kmem_cache_t *slab;
+ mempool_t *mempool;
+};
+
+/*
* Contains IO mapped address information
*/
struct i2o_io {
@@ -174,8 +539,6 @@ struct i2o_controller {
void __iomem *irq_status; /* Interrupt status register address */
void __iomem *irq_mask; /* Interrupt mask register address */
- /* Dynamic LCT related data */
-
struct i2o_dma status; /* IOP status block */
struct i2o_dma hrt; /* HW Resource Table */
@@ -188,6 +551,8 @@ struct i2o_controller {
struct i2o_io in_queue; /* inbound message queue Host->IOP */
struct i2o_dma out_queue; /* outbound message queue IOP->Host */
+ struct i2o_pool in_msg; /* mempool for inbound messages */
+
unsigned int battery:1; /* Has a battery backup */
unsigned int io_alloc:1; /* An I/O resource was allocated */
unsigned int mem_alloc:1; /* A memory resource was allocated */
@@ -247,16 +612,13 @@ struct i2o_sys_tbl {
extern struct list_head i2o_controllers;
/* Message functions */
-static inline u32 i2o_msg_get(struct i2o_controller *,
- struct i2o_message __iomem **);
-extern u32 i2o_msg_get_wait(struct i2o_controller *,
- struct i2o_message __iomem **, int);
-static inline void i2o_msg_post(struct i2o_controller *, u32);
-static inline int i2o_msg_post_wait(struct i2o_controller *, u32,
- unsigned long);
-extern int i2o_msg_post_wait_mem(struct i2o_controller *, u32, unsigned long,
- struct i2o_dma *);
-extern void i2o_msg_nop(struct i2o_controller *, u32);
+static inline struct i2o_message *i2o_msg_get(struct i2o_controller *);
+extern struct i2o_message *i2o_msg_get_wait(struct i2o_controller *, int);
+static inline void i2o_msg_post(struct i2o_controller *, struct i2o_message *);
+static inline int i2o_msg_post_wait(struct i2o_controller *,
+ struct i2o_message *, unsigned long);
+extern int i2o_msg_post_wait_mem(struct i2o_controller *, struct i2o_message *,
+ unsigned long, struct i2o_dma *);
static inline void i2o_flush_reply(struct i2o_controller *, u32);
/* IOP functions */
@@ -384,10 +746,10 @@ static inline u16 i2o_sg_tablesize(struct i2o_controller *c, u16 body_size)
static inline dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr,
size_t size,
enum dma_data_direction direction,
- u32 __iomem ** sg_ptr)
+ u32 ** sg_ptr)
{
u32 sg_flags;
- u32 __iomem *mptr = *sg_ptr;
+ u32 *mptr = *sg_ptr;
dma_addr_t dma_addr;
switch (direction) {
@@ -405,16 +767,16 @@ static inline dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr,
if (!dma_mapping_error(dma_addr)) {
#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
if ((sizeof(dma_addr_t) > 4) && c->pae_support) {
- writel(0x7C020002, mptr++);
- writel(PAGE_SIZE, mptr++);
+ *mptr++ = cpu_to_le32(0x7C020002);
+ *mptr++ = cpu_to_le32(PAGE_SIZE);
}
#endif
- writel(sg_flags | size, mptr++);
- writel(i2o_dma_low(dma_addr), mptr++);
+ *mptr++ = cpu_to_le32(sg_flags | size);
+ *mptr++ = cpu_to_le32(i2o_dma_low(dma_addr));
#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
if ((sizeof(dma_addr_t) > 4) && c->pae_support)
- writel(i2o_dma_high(dma_addr), mptr++);
+ *mptr++ = cpu_to_le32(i2o_dma_high(dma_addr));
#endif
*sg_ptr = mptr;
}
@@ -439,10 +801,10 @@ static inline dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr,
static inline int i2o_dma_map_sg(struct i2o_controller *c,
struct scatterlist *sg, int sg_count,
enum dma_data_direction direction,
- u32 __iomem ** sg_ptr)
+ u32 ** sg_ptr)
{
u32 sg_flags;
- u32 __iomem *mptr = *sg_ptr;
+ u32 *mptr = *sg_ptr;
switch (direction) {
case DMA_TO_DEVICE:
@@ -461,19 +823,19 @@ static inline int i2o_dma_map_sg(struct i2o_controller *c,
#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
if ((sizeof(dma_addr_t) > 4) && c->pae_support) {
- writel(0x7C020002, mptr++);
- writel(PAGE_SIZE, mptr++);
+ *mptr++ = cpu_to_le32(0x7C020002);
+ *mptr++ = cpu_to_le32(PAGE_SIZE);
}
#endif
while (sg_count-- > 0) {
if (!sg_count)
sg_flags |= 0xC0000000;
- writel(sg_flags | sg_dma_len(sg), mptr++);
- writel(i2o_dma_low(sg_dma_address(sg)), mptr++);
+ *mptr++ = cpu_to_le32(sg_flags | sg_dma_len(sg));
+ *mptr++ = cpu_to_le32(i2o_dma_low(sg_dma_address(sg)));
#ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
if ((sizeof(dma_addr_t) > 4) && c->pae_support)
- writel(i2o_dma_high(sg_dma_address(sg)), mptr++);
+ *mptr++ = cpu_to_le32(i2o_dma_high(sg_dma_address(sg)));
#endif
sg++;
}
@@ -563,6 +925,64 @@ static inline int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr,
return 0;
};
+/*
+ * i2o_pool_alloc - Allocate an slab cache and mempool
+ * @mempool: pointer to struct i2o_pool to write data into.
+ * @name: name which is used to identify cache
+ * @size: size of each object
+ * @min_nr: minimum number of objects
+ *
+ * First allocates a slab cache with name and size. Then allocates a
+ * mempool which uses the slab cache for allocation and freeing.
+ *
+ * Returns 0 on success or negative error code on failure.
+ */
+static inline int i2o_pool_alloc(struct i2o_pool *pool, const char *name,
+ size_t size, int min_nr)
+{
+ pool->name = kmalloc(strlen(name) + 1, GFP_KERNEL);
+ if (!pool->name)
+ goto exit;
+ strcpy(pool->name, name);
+
+ pool->slab =
+ kmem_cache_create(pool->name, size, 0, SLAB_HWCACHE_ALIGN, NULL,
+ NULL);
+ if (!pool->slab)
+ goto free_name;
+
+ pool->mempool =
+ mempool_create(min_nr, mempool_alloc_slab, mempool_free_slab,
+ pool->slab);
+ if (!pool->mempool)
+ goto free_slab;
+
+ return 0;
+
+ free_slab:
+ kmem_cache_destroy(pool->slab);
+
+ free_name:
+ kfree(pool->name);
+
+ exit:
+ return -ENOMEM;
+};
+
+/*
+ * i2o_pool_free - Free slab cache and mempool again
+ * @mempool: pointer to struct i2o_pool which should be freed
+ *
+ * Note that you have to return all objects to the mempool again before
+ * calling i2o_pool_free().
+ */
+static inline void i2o_pool_free(struct i2o_pool *pool)
+{
+ mempool_destroy(pool->mempool);
+ kmem_cache_destroy(pool->slab);
+ kfree(pool->name);
+};
+
/* I2O driver (OSM) functions */
extern int i2o_driver_register(struct i2o_driver *);
extern void i2o_driver_unregister(struct i2o_driver *);
@@ -638,39 +1058,89 @@ extern int i2o_exec_lct_get(struct i2o_controller *);
#define kobj_to_i2o_device(kobj) to_i2o_device(container_of(kobj, struct device, kobj))
/**
+ * i2o_out_to_virt - Turn an I2O message to a virtual address
+ * @c: controller
+ * @m: message engine value
+ *
+ * Turn a receive message from an I2O controller bus address into
+ * a Linux virtual address. The shared page frame is a linear block
+ * so we simply have to shift the offset. This function does not
+ * work for sender side messages as they are ioremap objects
+ * provided by the I2O controller.
+ */
+static inline struct i2o_message *i2o_msg_out_to_virt(struct i2o_controller *c,
+ u32 m)
+{
+ BUG_ON(m < c->out_queue.phys
+ || m >= c->out_queue.phys + c->out_queue.len);
+
+ return c->out_queue.virt + (m - c->out_queue.phys);
+};
+
+/**
+ * i2o_msg_in_to_virt - Turn an I2O message to a virtual address
+ * @c: controller
+ * @m: message engine value
+ *
+ * Turn a send message from an I2O controller bus address into
+ * a Linux virtual address. The shared page frame is a linear block
+ * so we simply have to shift the offset. This function does not
+ * work for receive side messages as they are kmalloc objects
+ * in a different pool.
+ */
+static inline struct i2o_message __iomem *i2o_msg_in_to_virt(struct
+ i2o_controller *c,
+ u32 m)
+{
+ return c->in_queue.virt + m;
+};
+
+/**
* i2o_msg_get - obtain an I2O message from the IOP
* @c: I2O controller
- * @msg: pointer to a I2O message pointer
*
- * This function tries to get a message slot. If no message slot is
+ * This function tries to get a message frame. If no message frame is
* available do not wait until one is availabe (see also i2o_msg_get_wait).
+ * The returned pointer to the message frame is not in I/O memory, it is
+ * allocated from a mempool. But because a MFA is allocated from the
+ * controller too it is guaranteed that i2o_msg_post() will never fail.
*
- * On a success the message is returned and the pointer to the message is
- * set in msg. The returned message is the physical page frame offset
- * address from the read port (see the i2o spec). If no message is
- * available returns I2O_QUEUE_EMPTY and msg is leaved untouched.
+ * On a success a pointer to the message frame is returned. If the message
+ * queue is empty -EBUSY is returned and if no memory is available -ENOMEM
+ * is returned.
*/
-static inline u32 i2o_msg_get(struct i2o_controller *c,
- struct i2o_message __iomem ** msg)
+static inline struct i2o_message *i2o_msg_get(struct i2o_controller *c)
{
- u32 m = readl(c->in_port);
-
- if (m != I2O_QUEUE_EMPTY)
- *msg = c->in_queue.virt + m;
+ struct i2o_msg_mfa *mmsg = mempool_alloc(c->in_msg.mempool, GFP_ATOMIC);
+ if (!mmsg)
+ return ERR_PTR(-ENOMEM);
+
+ mmsg->mfa = readl(c->in_port);
+ if (mmsg->mfa == I2O_QUEUE_EMPTY) {
+ mempool_free(mmsg, c->in_msg.mempool);
+ return ERR_PTR(-EBUSY);
+ }
- return m;
+ return &mmsg->msg;
};
/**
* i2o_msg_post - Post I2O message to I2O controller
* @c: I2O controller to which the message should be send
- * @m: the message identifier
+ * @msg: message returned by i2o_msg_get()
*
- * Post the message to the I2O controller.
+ * Post the message to the I2O controller and return immediately.
*/
-static inline void i2o_msg_post(struct i2o_controller *c, u32 m)
+static inline void i2o_msg_post(struct i2o_controller *c,
+ struct i2o_message *msg)
{
- writel(m, c->in_port);
+ struct i2o_msg_mfa *mmsg;
+
+ mmsg = container_of(msg, struct i2o_msg_mfa, msg);
+ memcpy_toio(i2o_msg_in_to_virt(c, mmsg->mfa), msg,
+ (le32_to_cpu(msg->u.head[0]) >> 16) << 2);
+ writel(mmsg->mfa, c->in_port);
+ mempool_free(mmsg, c->in_msg.mempool);
};
/**
@@ -685,62 +1155,66 @@ static inline void i2o_msg_post(struct i2o_controller *c, u32 m)
*
* Returns 0 on success or negative error code on failure.
*/
-static inline int i2o_msg_post_wait(struct i2o_controller *c, u32 m,
+static inline int i2o_msg_post_wait(struct i2o_controller *c,
+ struct i2o_message *msg,
unsigned long timeout)
{
- return i2o_msg_post_wait_mem(c, m, timeout, NULL);
+ return i2o_msg_post_wait_mem(c, msg, timeout, NULL);
};
/**
- * i2o_flush_reply - Flush reply from I2O controller
- * @c: I2O controller
- * @m: the message identifier
+ * i2o_msg_nop_mfa - Returns a fetched MFA back to the controller
+ * @c: I2O controller from which the MFA was fetched
+ * @mfa: MFA which should be returned
*
- * The I2O controller must be informed that the reply message is not needed
- * anymore. If you forget to flush the reply, the message frame can't be
- * used by the controller anymore and is therefore lost.
+ * This function must be used for preserved messages, because i2o_msg_nop()
+ * also returns the allocated memory back to the msg_pool mempool.
*/
-static inline void i2o_flush_reply(struct i2o_controller *c, u32 m)
+static inline void i2o_msg_nop_mfa(struct i2o_controller *c, u32 mfa)
{
- writel(m, c->out_port);
+ struct i2o_message __iomem *msg;
+ u32 nop[3] = {
+ THREE_WORD_MSG_SIZE | SGL_OFFSET_0,
+ I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | ADAPTER_TID,
+ 0x00000000
+ };
+
+ msg = i2o_msg_in_to_virt(c, mfa);
+ memcpy_toio(msg, nop, sizeof(nop));
+ writel(mfa, c->in_port);
};
/**
- * i2o_out_to_virt - Turn an I2O message to a virtual address
- * @c: controller
- * @m: message engine value
+ * i2o_msg_nop - Returns a message which is not used
+ * @c: I2O controller from which the message was created
+ * @msg: message which should be returned
*
- * Turn a receive message from an I2O controller bus address into
- * a Linux virtual address. The shared page frame is a linear block
- * so we simply have to shift the offset. This function does not
- * work for sender side messages as they are ioremap objects
- * provided by the I2O controller.
+ * If you fetch a message via i2o_msg_get, and can't use it, you must
+ * return the message with this function. Otherwise the MFA is lost as well
+ * as the allocated memory from the mempool.
*/
-static inline struct i2o_message *i2o_msg_out_to_virt(struct i2o_controller *c,
- u32 m)
+static inline void i2o_msg_nop(struct i2o_controller *c,
+ struct i2o_message *msg)
{
- BUG_ON(m < c->out_queue.phys
- || m >= c->out_queue.phys + c->out_queue.len);
+ struct i2o_msg_mfa *mmsg;
+ mmsg = container_of(msg, struct i2o_msg_mfa, msg);
- return c->out_queue.virt + (m - c->out_queue.phys);
+ i2o_msg_nop_mfa(c, mmsg->mfa);
+ mempool_free(mmsg, c->in_msg.mempool);
};
/**
- * i2o_msg_in_to_virt - Turn an I2O message to a virtual address
- * @c: controller
- * @m: message engine value
+ * i2o_flush_reply - Flush reply from I2O controller
+ * @c: I2O controller
+ * @m: the message identifier
*
- * Turn a send message from an I2O controller bus address into
- * a Linux virtual address. The shared page frame is a linear block
- * so we simply have to shift the offset. This function does not
- * work for receive side messages as they are kmalloc objects
- * in a different pool.
+ * The I2O controller must be informed that the reply message is not needed
+ * anymore. If you forget to flush the reply, the message frame can't be
+ * used by the controller anymore and is therefore lost.
*/
-static inline struct i2o_message __iomem *i2o_msg_in_to_virt(struct
- i2o_controller *c,
- u32 m)
+static inline void i2o_flush_reply(struct i2o_controller *c, u32 m)
{
- return c->in_queue.virt + m;
+ writel(m, c->out_port);
};
/*
@@ -779,350 +1253,5 @@ extern void i2o_dump_message(struct i2o_message *);
extern void i2o_dump_hrt(struct i2o_controller *c);
extern void i2o_debug_state(struct i2o_controller *c);
-/*
- * Cache strategies
- */
-
-/* The NULL strategy leaves everything up to the controller. This tends to be a
- * pessimal but functional choice.
- */
-#define CACHE_NULL 0
-/* Prefetch data when reading. We continually attempt to load the next 32 sectors
- * into the controller cache.
- */
-#define CACHE_PREFETCH 1
-/* Prefetch data when reading. We sometimes attempt to load the next 32 sectors
- * into the controller cache. When an I/O is less <= 8K we assume its probably
- * not sequential and don't prefetch (default)
- */
-#define CACHE_SMARTFETCH 2
-/* Data is written to the cache and then out on to the disk. The I/O must be
- * physically on the medium before the write is acknowledged (default without
- * NVRAM)
- */
-#define CACHE_WRITETHROUGH 17
-/* Data is written to the cache and then out on to the disk. The controller
- * is permitted to write back the cache any way it wants. (default if battery
- * backed NVRAM is present). It can be useful to set this for swap regardless of
- * battery state.
- */
-#define CACHE_WRITEBACK 18
-/* Optimise for under powered controllers, especially on RAID1 and RAID0. We
- * write large I/O's directly to disk bypassing the cache to avoid the extra
- * memory copy hits. Small writes are writeback cached
- */
-#define CACHE_SMARTBACK 19
-/* Optimise for under powered controllers, especially on RAID1 and RAID0. We
- * write large I/O's directly to disk bypassing the cache to avoid the extra
- * memory copy hits. Small writes are writethrough cached. Suitable for devices
- * lacking battery backup
- */
-#define CACHE_SMARTTHROUGH 20
-
-/*
- * Ioctl structures
- */
-
-#define BLKI2OGRSTRAT _IOR('2', 1, int)
-#define BLKI2OGWSTRAT _IOR('2', 2, int)
-#define BLKI2OSRSTRAT _IOW('2', 3, int)
-#define BLKI2OSWSTRAT _IOW('2', 4, int)
-
-/*
- * I2O Function codes
- */
-
-/*
- * Executive Class
- */
-#define I2O_CMD_ADAPTER_ASSIGN 0xB3
-#define I2O_CMD_ADAPTER_READ 0xB2
-#define I2O_CMD_ADAPTER_RELEASE 0xB5
-#define I2O_CMD_BIOS_INFO_SET 0xA5
-#define I2O_CMD_BOOT_DEVICE_SET 0xA7
-#define I2O_CMD_CONFIG_VALIDATE 0xBB
-#define I2O_CMD_CONN_SETUP 0xCA
-#define I2O_CMD_DDM_DESTROY 0xB1
-#define I2O_CMD_DDM_ENABLE 0xD5
-#define I2O_CMD_DDM_QUIESCE 0xC7
-#define I2O_CMD_DDM_RESET 0xD9
-#define I2O_CMD_DDM_SUSPEND 0xAF
-#define I2O_CMD_DEVICE_ASSIGN 0xB7
-#define I2O_CMD_DEVICE_RELEASE 0xB9
-#define I2O_CMD_HRT_GET 0xA8
-#define I2O_CMD_ADAPTER_CLEAR 0xBE
-#define I2O_CMD_ADAPTER_CONNECT 0xC9
-#define I2O_CMD_ADAPTER_RESET 0xBD
-#define I2O_CMD_LCT_NOTIFY 0xA2
-#define I2O_CMD_OUTBOUND_INIT 0xA1
-#define I2O_CMD_PATH_ENABLE 0xD3
-#define I2O_CMD_PATH_QUIESCE 0xC5
-#define I2O_CMD_PATH_RESET 0xD7
-#define I2O_CMD_STATIC_MF_CREATE 0xDD
-#define I2O_CMD_STATIC_MF_RELEASE 0xDF
-#define I2O_CMD_STATUS_GET 0xA0
-#define I2O_CMD_SW_DOWNLOAD 0xA9
-#define I2O_CMD_SW_UPLOAD 0xAB
-#define I2O_CMD_SW_REMOVE 0xAD
-#define I2O_CMD_SYS_ENABLE 0xD1
-#define I2O_CMD_SYS_MODIFY 0xC1
-#define I2O_CMD_SYS_QUIESCE 0xC3
-#define I2O_CMD_SYS_TAB_SET 0xA3
-
-/*
- * Utility Class
- */
-#define I2O_CMD_UTIL_NOP 0x00
-#define I2O_CMD_UTIL_ABORT 0x01
-#define I2O_CMD_UTIL_CLAIM 0x09
-#define I2O_CMD_UTIL_RELEASE 0x0B
-#define I2O_CMD_UTIL_PARAMS_GET 0x06
-#define I2O_CMD_UTIL_PARAMS_SET 0x05
-#define I2O_CMD_UTIL_EVT_REGISTER 0x13
-#define I2O_CMD_UTIL_EVT_ACK 0x14
-#define I2O_CMD_UTIL_CONFIG_DIALOG 0x10
-#define I2O_CMD_UTIL_DEVICE_RESERVE 0x0D
-#define I2O_CMD_UTIL_DEVICE_RELEASE 0x0F
-#define I2O_CMD_UTIL_LOCK 0x17
-#define I2O_CMD_UTIL_LOCK_RELEASE 0x19
-#define I2O_CMD_UTIL_REPLY_FAULT_NOTIFY 0x15
-
-/*
- * SCSI Host Bus Adapter Class
- */
-#define I2O_CMD_SCSI_EXEC 0x81
-#define I2O_CMD_SCSI_ABORT 0x83
-#define I2O_CMD_SCSI_BUSRESET 0x27
-
-/*
- * Bus Adapter Class
- */
-#define I2O_CMD_BUS_ADAPTER_RESET 0x85
-#define I2O_CMD_BUS_RESET 0x87
-#define I2O_CMD_BUS_SCAN 0x89
-#define I2O_CMD_BUS_QUIESCE 0x8b
-
-/*
- * Random Block Storage Class
- */
-#define I2O_CMD_BLOCK_READ 0x30
-#define I2O_CMD_BLOCK_WRITE 0x31
-#define I2O_CMD_BLOCK_CFLUSH 0x37
-#define I2O_CMD_BLOCK_MLOCK 0x49
-#define I2O_CMD_BLOCK_MUNLOCK 0x4B
-#define I2O_CMD_BLOCK_MMOUNT 0x41
-#define I2O_CMD_BLOCK_MEJECT 0x43
-#define I2O_CMD_BLOCK_POWER 0x70
-
-#define I2O_CMD_PRIVATE 0xFF
-
-/* Command status values */
-
-#define I2O_CMD_IN_PROGRESS 0x01
-#define I2O_CMD_REJECTED 0x02
-#define I2O_CMD_FAILED 0x03
-#define I2O_CMD_COMPLETED 0x04
-
-/* I2O API function return values */
-
-#define I2O_RTN_NO_ERROR 0
-#define I2O_RTN_NOT_INIT 1
-#define I2O_RTN_FREE_Q_EMPTY 2
-#define I2O_RTN_TCB_ERROR 3
-#define I2O_RTN_TRANSACTION_ERROR 4
-#define I2O_RTN_ADAPTER_ALREADY_INIT 5
-#define I2O_RTN_MALLOC_ERROR 6
-#define I2O_RTN_ADPTR_NOT_REGISTERED 7
-#define I2O_RTN_MSG_REPLY_TIMEOUT 8
-#define I2O_RTN_NO_STATUS 9
-#define I2O_RTN_NO_FIRM_VER 10
-#define I2O_RTN_NO_LINK_SPEED 11
-
-/* Reply message status defines for all messages */
-
-#define I2O_REPLY_STATUS_SUCCESS 0x00
-#define I2O_REPLY_STATUS_ABORT_DIRTY 0x01
-#define I2O_REPLY_STATUS_ABORT_NO_DATA_TRANSFER 0x02
-#define I2O_REPLY_STATUS_ABORT_PARTIAL_TRANSFER 0x03
-#define I2O_REPLY_STATUS_ERROR_DIRTY 0x04
-#define I2O_REPLY_STATUS_ERROR_NO_DATA_TRANSFER 0x05
-#define I2O_REPLY_STATUS_ERROR_PARTIAL_TRANSFER 0x06
-#define I2O_REPLY_STATUS_PROCESS_ABORT_DIRTY 0x08
-#define I2O_REPLY_STATUS_PROCESS_ABORT_NO_DATA_TRANSFER 0x09
-#define I2O_REPLY_STATUS_PROCESS_ABORT_PARTIAL_TRANSFER 0x0A
-#define I2O_REPLY_STATUS_TRANSACTION_ERROR 0x0B
-#define I2O_REPLY_STATUS_PROGRESS_REPORT 0x80
-
-/* Status codes and Error Information for Parameter functions */
-
-#define I2O_PARAMS_STATUS_SUCCESS 0x00
-#define I2O_PARAMS_STATUS_BAD_KEY_ABORT 0x01
-#define I2O_PARAMS_STATUS_BAD_KEY_CONTINUE 0x02
-#define I2O_PARAMS_STATUS_BUFFER_FULL 0x03
-#define I2O_PARAMS_STATUS_BUFFER_TOO_SMALL 0x04
-#define I2O_PARAMS_STATUS_FIELD_UNREADABLE 0x05
-#define I2O_PARAMS_STATUS_FIELD_UNWRITEABLE 0x06
-#define I2O_PARAMS_STATUS_INSUFFICIENT_FIELDS 0x07
-#define I2O_PARAMS_STATUS_INVALID_GROUP_ID 0x08
-#define I2O_PARAMS_STATUS_INVALID_OPERATION 0x09
-#define I2O_PARAMS_STATUS_NO_KEY_FIELD 0x0A
-#define I2O_PARAMS_STATUS_NO_SUCH_FIELD 0x0B
-#define I2O_PARAMS_STATUS_NON_DYNAMIC_GROUP 0x0C
-#define I2O_PARAMS_STATUS_OPERATION_ERROR 0x0D
-#define I2O_PARAMS_STATUS_SCALAR_ERROR 0x0E
-#define I2O_PARAMS_STATUS_TABLE_ERROR 0x0F
-#define I2O_PARAMS_STATUS_WRONG_GROUP_TYPE 0x10
-
-/* DetailedStatusCode defines for Executive, DDM, Util and Transaction error
- * messages: Table 3-2 Detailed Status Codes.*/
-
-#define I2O_DSC_SUCCESS 0x0000
-#define I2O_DSC_BAD_KEY 0x0002
-#define I2O_DSC_TCL_ERROR 0x0003
-#define I2O_DSC_REPLY_BUFFER_FULL 0x0004
-#define I2O_DSC_NO_SUCH_PAGE 0x0005
-#define I2O_DSC_INSUFFICIENT_RESOURCE_SOFT 0x0006
-#define I2O_DSC_INSUFFICIENT_RESOURCE_HARD 0x0007
-#define I2O_DSC_CHAIN_BUFFER_TOO_LARGE 0x0009
-#define I2O_DSC_UNSUPPORTED_FUNCTION 0x000A
-#define I2O_DSC_DEVICE_LOCKED 0x000B
-#define I2O_DSC_DEVICE_RESET 0x000C
-#define I2O_DSC_INAPPROPRIATE_FUNCTION 0x000D
-#define I2O_DSC_INVALID_INITIATOR_ADDRESS 0x000E
-#define I2O_DSC_INVALID_MESSAGE_FLAGS 0x000F
-#define I2O_DSC_INVALID_OFFSET 0x0010
-#define I2O_DSC_INVALID_PARAMETER 0x0011
-#define I2O_DSC_INVALID_REQUEST 0x0012
-#define I2O_DSC_INVALID_TARGET_ADDRESS 0x0013
-#define I2O_DSC_MESSAGE_TOO_LARGE 0x0014
-#define I2O_DSC_MESSAGE_TOO_SMALL 0x0015
-#define I2O_DSC_MISSING_PARAMETER 0x0016
-#define I2O_DSC_TIMEOUT 0x0017
-#define I2O_DSC_UNKNOWN_ERROR 0x0018
-#define I2O_DSC_UNKNOWN_FUNCTION 0x0019
-#define I2O_DSC_UNSUPPORTED_VERSION 0x001A
-#define I2O_DSC_DEVICE_BUSY 0x001B
-#define I2O_DSC_DEVICE_NOT_AVAILABLE 0x001C
-
-/* DetailedStatusCode defines for Block Storage Operation: Table 6-7 Detailed
- Status Codes.*/
-
-#define I2O_BSA_DSC_SUCCESS 0x0000
-#define I2O_BSA_DSC_MEDIA_ERROR 0x0001
-#define I2O_BSA_DSC_ACCESS_ERROR 0x0002
-#define I2O_BSA_DSC_DEVICE_FAILURE 0x0003
-#define I2O_BSA_DSC_DEVICE_NOT_READY 0x0004
-#define I2O_BSA_DSC_MEDIA_NOT_PRESENT 0x0005
-#define I2O_BSA_DSC_MEDIA_LOCKED 0x0006
-#define I2O_BSA_DSC_MEDIA_FAILURE 0x0007
-#define I2O_BSA_DSC_PROTOCOL_FAILURE 0x0008
-#define I2O_BSA_DSC_BUS_FAILURE 0x0009
-#define I2O_BSA_DSC_ACCESS_VIOLATION 0x000A
-#define I2O_BSA_DSC_WRITE_PROTECTED 0x000B
-#define I2O_BSA_DSC_DEVICE_RESET 0x000C
-#define I2O_BSA_DSC_VOLUME_CHANGED 0x000D
-#define I2O_BSA_DSC_TIMEOUT 0x000E
-
-/* FailureStatusCodes, Table 3-3 Message Failure Codes */
-
-#define I2O_FSC_TRANSPORT_SERVICE_SUSPENDED 0x81
-#define I2O_FSC_TRANSPORT_SERVICE_TERMINATED 0x82
-#define I2O_FSC_TRANSPORT_CONGESTION 0x83
-#define I2O_FSC_TRANSPORT_FAILURE 0x84
-#define I2O_FSC_TRANSPORT_STATE_ERROR 0x85
-#define I2O_FSC_TRANSPORT_TIME_OUT 0x86
-#define I2O_FSC_TRANSPORT_ROUTING_FAILURE 0x87
-#define I2O_FSC_TRANSPORT_INVALID_VERSION 0x88
-#define I2O_FSC_TRANSPORT_INVALID_OFFSET 0x89
-#define I2O_FSC_TRANSPORT_INVALID_MSG_FLAGS 0x8A
-#define I2O_FSC_TRANSPORT_FRAME_TOO_SMALL 0x8B
-#define I2O_FSC_TRANSPORT_FRAME_TOO_LARGE 0x8C
-#define I2O_FSC_TRANSPORT_INVALID_TARGET_ID 0x8D
-#define I2O_FSC_TRANSPORT_INVALID_INITIATOR_ID 0x8E
-#define I2O_FSC_TRANSPORT_INVALID_INITIATOR_CONTEXT 0x8F
-#define I2O_FSC_TRANSPORT_UNKNOWN_FAILURE 0xFF
-
-/* Device Claim Types */
-#define I2O_CLAIM_PRIMARY 0x01000000
-#define I2O_CLAIM_MANAGEMENT 0x02000000
-#define I2O_CLAIM_AUTHORIZED 0x03000000
-#define I2O_CLAIM_SECONDARY 0x04000000
-
-/* Message header defines for VersionOffset */
-#define I2OVER15 0x0001
-#define I2OVER20 0x0002
-
-/* Default is 1.5 */
-#define I2OVERSION I2OVER15
-
-#define SGL_OFFSET_0 I2OVERSION
-#define SGL_OFFSET_4 (0x0040 | I2OVERSION)
-#define SGL_OFFSET_5 (0x0050 | I2OVERSION)
-#define SGL_OFFSET_6 (0x0060 | I2OVERSION)
-#define SGL_OFFSET_7 (0x0070 | I2OVERSION)
-#define SGL_OFFSET_8 (0x0080 | I2OVERSION)
-#define SGL_OFFSET_9 (0x0090 | I2OVERSION)
-#define SGL_OFFSET_10 (0x00A0 | I2OVERSION)
-#define SGL_OFFSET_11 (0x00B0 | I2OVERSION)
-#define SGL_OFFSET_12 (0x00C0 | I2OVERSION)
-#define SGL_OFFSET(x) (((x)<<4) | I2OVERSION)
-
-/* Transaction Reply Lists (TRL) Control Word structure */
-#define TRL_SINGLE_FIXED_LENGTH 0x00
-#define TRL_SINGLE_VARIABLE_LENGTH 0x40
-#define TRL_MULTIPLE_FIXED_LENGTH 0x80
-
- /* msg header defines for MsgFlags */
-#define MSG_STATIC 0x0100
-#define MSG_64BIT_CNTXT 0x0200
-#define MSG_MULTI_TRANS 0x1000
-#define MSG_FAIL 0x2000
-#define MSG_FINAL 0x4000
-#define MSG_REPLY 0x8000
-
- /* minimum size msg */
-#define THREE_WORD_MSG_SIZE 0x00030000
-#define FOUR_WORD_MSG_SIZE 0x00040000
-#define FIVE_WORD_MSG_SIZE 0x00050000
-#define SIX_WORD_MSG_SIZE 0x00060000
-#define SEVEN_WORD_MSG_SIZE 0x00070000
-#define EIGHT_WORD_MSG_SIZE 0x00080000
-#define NINE_WORD_MSG_SIZE 0x00090000
-#define TEN_WORD_MSG_SIZE 0x000A0000
-#define ELEVEN_WORD_MSG_SIZE 0x000B0000
-#define I2O_MESSAGE_SIZE(x) ((x)<<16)
-
-/* special TID assignments */
-#define ADAPTER_TID 0
-#define HOST_TID 1
-
-/* outbound queue defines */
-#define I2O_MAX_OUTBOUND_MSG_FRAMES 128
-#define I2O_OUTBOUND_MSG_FRAME_SIZE 128 /* in 32-bit words */
-
-#define I2O_POST_WAIT_OK 0
-#define I2O_POST_WAIT_TIMEOUT -ETIMEDOUT
-
-#define I2O_CONTEXT_LIST_MIN_LENGTH 15
-#define I2O_CONTEXT_LIST_USED 0x01
-#define I2O_CONTEXT_LIST_DELETED 0x02
-
-/* timeouts */
-#define I2O_TIMEOUT_INIT_OUTBOUND_QUEUE 15
-#define I2O_TIMEOUT_MESSAGE_GET 5
-#define I2O_TIMEOUT_RESET 30
-#define I2O_TIMEOUT_STATUS_GET 5
-#define I2O_TIMEOUT_LCT_GET 360
-#define I2O_TIMEOUT_SCSI_SCB_ABORT 240
-
-/* retries */
-#define I2O_HRT_GET_TRIES 3
-#define I2O_LCT_GET_TRIES 3
-
-/* defines for max_sectors and max_phys_segments */
-#define I2O_MAX_SECTORS 1024
-#define I2O_MAX_SECTORS_LIMITED 256
-#define I2O_MAX_PHYS_SEGMENTS MAX_PHYS_SEGMENTS
-
#endif /* __KERNEL__ */
#endif /* _I2O_H */