aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi
diff options
context:
space:
mode:
authorWolfgang Wiedmeyer <wolfgit@wiedmeyer.de>2015-10-23 03:29:33 +0200
committerWolfgang Wiedmeyer <wolfgit@wiedmeyer.de>2015-10-23 03:29:33 +0200
commit15dfd0df63ce6847081d09b2bbd567cc0cc4eae1 (patch)
tree3b73f24fcef970bfcace3cbb297cfa57f3994682 /drivers/scsi
parent328aa7a45af61bc0060c80847daa67fef7b9c0d0 (diff)
parent0149138c4142da287d23f9d5c6038f7fb5e30ac2 (diff)
downloadkernel_samsung_smdk4412-15dfd0df63ce6847081d09b2bbd567cc0cc4eae1.zip
kernel_samsung_smdk4412-15dfd0df63ce6847081d09b2bbd567cc0cc4eae1.tar.gz
kernel_samsung_smdk4412-15dfd0df63ce6847081d09b2bbd567cc0cc4eae1.tar.bz2
initial merge with 3.2.72
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c3235
-rw-r--r--drivers/scsi/bfa/bfad_bsg.h746
-rw-r--r--drivers/scsi/bfa/bfi_reg.h450
-rw-r--r--drivers/scsi/mvumi.c2018
-rw-r--r--drivers/scsi/mvumi.h505
-rw-r--r--drivers/scsi/qla4xxx/ql4_bsg.c513
-rw-r--r--drivers/scsi/qla4xxx/ql4_bsg.h19
7 files changed, 7486 insertions, 0 deletions
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
new file mode 100644
index 0000000..06fc00c
--- /dev/null
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -0,0 +1,3235 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/uaccess.h>
+#include "bfad_drv.h"
+#include "bfad_im.h"
+#include "bfad_bsg.h"
+
+BFA_TRC_FILE(LDRV, BSG);
+
+int
+bfad_iocmd_ioc_enable(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ int rc = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ /* If IOC is not in disabled state - return */
+ if (!bfa_ioc_is_disabled(&bfad->bfa.ioc)) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_IOC_FAILURE;
+ return rc;
+ }
+
+ init_completion(&bfad->enable_comp);
+ bfa_iocfc_enable(&bfad->bfa);
+ iocmd->status = BFA_STATUS_OK;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ wait_for_completion(&bfad->enable_comp);
+
+ return rc;
+}
+
+int
+bfad_iocmd_ioc_disable(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ int rc = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ if (bfad->disable_active) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return -EBUSY;
+ }
+
+ bfad->disable_active = BFA_TRUE;
+ init_completion(&bfad->disable_comp);
+ bfa_iocfc_disable(&bfad->bfa);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ wait_for_completion(&bfad->disable_comp);
+ bfad->disable_active = BFA_FALSE;
+ iocmd->status = BFA_STATUS_OK;
+
+ return rc;
+}
+
+static int
+bfad_iocmd_ioc_get_info(struct bfad_s *bfad, void *cmd)
+{
+ int i;
+ struct bfa_bsg_ioc_info_s *iocmd = (struct bfa_bsg_ioc_info_s *)cmd;
+ struct bfad_im_port_s *im_port;
+ struct bfa_port_attr_s pattr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfa_fcport_get_attr(&bfad->bfa, &pattr);
+ iocmd->nwwn = pattr.nwwn;
+ iocmd->pwwn = pattr.pwwn;
+ iocmd->ioc_type = bfa_get_type(&bfad->bfa);
+ iocmd->mac = bfa_get_mac(&bfad->bfa);
+ iocmd->factory_mac = bfa_get_mfg_mac(&bfad->bfa);
+ bfa_get_adapter_serial_num(&bfad->bfa, iocmd->serialnum);
+ iocmd->factorynwwn = pattr.factorynwwn;
+ iocmd->factorypwwn = pattr.factorypwwn;
+ iocmd->bfad_num = bfad->inst_no;
+ im_port = bfad->pport.im_port;
+ iocmd->host = im_port->shost->host_no;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ strcpy(iocmd->name, bfad->adapter_name);
+ strcpy(iocmd->port_name, bfad->port_name);
+ strcpy(iocmd->hwpath, bfad->pci_name);
+
+ /* set adapter hw path */
+ strcpy(iocmd->adapter_hwpath, bfad->pci_name);
+ i = strlen(iocmd->adapter_hwpath) - 1;
+ while (iocmd->adapter_hwpath[i] != '.')
+ i--;
+ iocmd->adapter_hwpath[i] = '\0';
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+static int
+bfad_iocmd_ioc_get_attr(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_ioc_attr_s *iocmd = (struct bfa_bsg_ioc_attr_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfa_ioc_get_attr(&bfad->bfa.ioc, &iocmd->ioc_attr);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ /* fill in driver attr info */
+ strcpy(iocmd->ioc_attr.driver_attr.driver, BFAD_DRIVER_NAME);
+ strncpy(iocmd->ioc_attr.driver_attr.driver_ver,
+ BFAD_DRIVER_VERSION, BFA_VERSION_LEN);
+ strcpy(iocmd->ioc_attr.driver_attr.fw_ver,
+ iocmd->ioc_attr.adapter_attr.fw_ver);
+ strcpy(iocmd->ioc_attr.driver_attr.bios_ver,
+ iocmd->ioc_attr.adapter_attr.optrom_ver);
+
+ /* copy chip rev info first otherwise it will be overwritten */
+ memcpy(bfad->pci_attr.chip_rev, iocmd->ioc_attr.pci_attr.chip_rev,
+ sizeof(bfad->pci_attr.chip_rev));
+ memcpy(&iocmd->ioc_attr.pci_attr, &bfad->pci_attr,
+ sizeof(struct bfa_ioc_pci_attr_s));
+
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+int
+bfad_iocmd_ioc_get_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_ioc_stats_s *iocmd = (struct bfa_bsg_ioc_stats_s *)cmd;
+
+ bfa_ioc_get_stats(&bfad->bfa, &iocmd->ioc_stats);
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+int
+bfad_iocmd_ioc_get_fwstats(struct bfad_s *bfad, void *cmd,
+ unsigned int payload_len)
+{
+ struct bfa_bsg_ioc_fwstats_s *iocmd =
+ (struct bfa_bsg_ioc_fwstats_s *)cmd;
+ void *iocmd_bufptr;
+ unsigned long flags;
+
+ if (bfad_chk_iocmd_sz(payload_len,
+ sizeof(struct bfa_bsg_ioc_fwstats_s),
+ sizeof(struct bfa_fw_stats_s)) != BFA_STATUS_OK) {
+ iocmd->status = BFA_STATUS_VERSION_FAIL;
+ goto out;
+ }
+
+ iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_ioc_fwstats_s);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_ioc_fw_stats_get(&bfad->bfa.ioc, iocmd_bufptr);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ if (iocmd->status != BFA_STATUS_OK) {
+ bfa_trc(bfad, iocmd->status);
+ goto out;
+ }
+out:
+ bfa_trc(bfad, 0x6666);
+ return 0;
+}
+
+int
+bfad_iocmd_ioc_reset_stats(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ unsigned long flags;
+
+ if (v_cmd == IOCMD_IOC_RESET_STATS) {
+ bfa_ioc_clear_stats(&bfad->bfa);
+ iocmd->status = BFA_STATUS_OK;
+ } else if (v_cmd == IOCMD_IOC_RESET_FWSTATS) {
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_ioc_fw_stats_clear(&bfad->bfa.ioc);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ }
+
+ return 0;
+}
+
+int
+bfad_iocmd_ioc_set_name(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+ struct bfa_bsg_ioc_name_s *iocmd = (struct bfa_bsg_ioc_name_s *) cmd;
+
+ if (v_cmd == IOCMD_IOC_SET_ADAPTER_NAME)
+ strcpy(bfad->adapter_name, iocmd->name);
+ else if (v_cmd == IOCMD_IOC_SET_PORT_NAME)
+ strcpy(bfad->port_name, iocmd->name);
+
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+int
+bfad_iocmd_iocfc_get_attr(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_iocfc_attr_s *iocmd = (struct bfa_bsg_iocfc_attr_s *)cmd;
+
+ iocmd->status = BFA_STATUS_OK;
+ bfa_iocfc_get_attr(&bfad->bfa, &iocmd->iocfc_attr);
+
+ return 0;
+}
+
+int
+bfad_iocmd_iocfc_set_intr(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_iocfc_intr_s *iocmd = (struct bfa_bsg_iocfc_intr_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_iocfc_israttr_set(&bfad->bfa, &iocmd->attr);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return 0;
+}
+
+int
+bfad_iocmd_port_enable(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_port_enable(&bfad->bfa.modules.port,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK) {
+ bfa_trc(bfad, iocmd->status);
+ return 0;
+ }
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+ return 0;
+}
+
+int
+bfad_iocmd_port_disable(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_port_disable(&bfad->bfa.modules.port,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ if (iocmd->status != BFA_STATUS_OK) {
+ bfa_trc(bfad, iocmd->status);
+ return 0;
+ }
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+ return 0;
+}
+
+static int
+bfad_iocmd_port_get_attr(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_port_attr_s *iocmd = (struct bfa_bsg_port_attr_s *)cmd;
+ struct bfa_lport_attr_s port_attr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfa_fcport_get_attr(&bfad->bfa, &iocmd->attr);
+ bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ if (iocmd->attr.topology != BFA_PORT_TOPOLOGY_NONE)
+ iocmd->attr.pid = port_attr.pid;
+ else
+ iocmd->attr.pid = 0;
+
+ iocmd->attr.port_type = port_attr.port_type;
+ iocmd->attr.loopback = port_attr.loopback;
+ iocmd->attr.authfail = port_attr.authfail;
+ strncpy(iocmd->attr.port_symname.symname,
+ port_attr.port_cfg.sym_name.symname,
+ sizeof(port_attr.port_cfg.sym_name.symname));
+
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+int
+bfad_iocmd_port_get_stats(struct bfad_s *bfad, void *cmd,
+ unsigned int payload_len)
+{
+ struct bfa_bsg_port_stats_s *iocmd = (struct bfa_bsg_port_stats_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ void *iocmd_bufptr;
+ unsigned long flags;
+
+ if (bfad_chk_iocmd_sz(payload_len,
+ sizeof(struct bfa_bsg_port_stats_s),
+ sizeof(union bfa_port_stats_u)) != BFA_STATUS_OK) {
+ iocmd->status = BFA_STATUS_VERSION_FAIL;
+ return 0;
+ }
+
+ iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_port_stats_s);
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_port_get_stats(&bfad->bfa.modules.port,
+ iocmd_bufptr, bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK) {
+ bfa_trc(bfad, iocmd->status);
+ goto out;
+ }
+
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_port_reset_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_port_clear_stats(&bfad->bfa.modules.port,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK) {
+ bfa_trc(bfad, iocmd->status);
+ return 0;
+ }
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+ return 0;
+}
+
+int
+bfad_iocmd_set_port_cfg(struct bfad_s *bfad, void *iocmd, unsigned int v_cmd)
+{
+ struct bfa_bsg_port_cfg_s *cmd = (struct bfa_bsg_port_cfg_s *)iocmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ if (v_cmd == IOCMD_PORT_CFG_TOPO)
+ cmd->status = bfa_fcport_cfg_topology(&bfad->bfa, cmd->param);
+ else if (v_cmd == IOCMD_PORT_CFG_SPEED)
+ cmd->status = bfa_fcport_cfg_speed(&bfad->bfa, cmd->param);
+ else if (v_cmd == IOCMD_PORT_CFG_ALPA)
+ cmd->status = bfa_fcport_cfg_hardalpa(&bfad->bfa, cmd->param);
+ else if (v_cmd == IOCMD_PORT_CLR_ALPA)
+ cmd->status = bfa_fcport_clr_hardalpa(&bfad->bfa);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return 0;
+}
+
+int
+bfad_iocmd_port_cfg_maxfrsize(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_port_cfg_maxfrsize_s *iocmd =
+ (struct bfa_bsg_port_cfg_maxfrsize_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_fcport_cfg_maxfrsize(&bfad->bfa, iocmd->maxfrsize);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return 0;
+}
+
+int
+bfad_iocmd_port_cfg_bbsc(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) {
+ if (v_cmd == IOCMD_PORT_BBSC_ENABLE)
+ fcport->cfg.bb_scn_state = BFA_TRUE;
+ else if (v_cmd == IOCMD_PORT_BBSC_DISABLE)
+ fcport->cfg.bb_scn_state = BFA_FALSE;
+ }
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+static int
+bfad_iocmd_lport_get_attr(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_fcs_lport_s *fcs_port;
+ struct bfa_bsg_lport_attr_s *iocmd = (struct bfa_bsg_lport_attr_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->pwwn);
+ if (fcs_port == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ goto out;
+ }
+
+ bfa_fcs_lport_get_attr(fcs_port, &iocmd->port_attr);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_lport_get_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_fcs_lport_s *fcs_port;
+ struct bfa_bsg_lport_stats_s *iocmd =
+ (struct bfa_bsg_lport_stats_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->pwwn);
+ if (fcs_port == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ goto out;
+ }
+
+ bfa_fcs_lport_get_stats(fcs_port, &iocmd->port_stats);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_lport_reset_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_fcs_lport_s *fcs_port;
+ struct bfa_bsg_reset_stats_s *iocmd =
+ (struct bfa_bsg_reset_stats_s *)cmd;
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
+ struct list_head *qe, *qen;
+ struct bfa_itnim_s *itnim;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->vpwwn);
+ if (fcs_port == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ goto out;
+ }
+
+ bfa_fcs_lport_clear_stats(fcs_port);
+ /* clear IO stats from all active itnims */
+ list_for_each_safe(qe, qen, &fcpim->itnim_q) {
+ itnim = (struct bfa_itnim_s *) qe;
+ if (itnim->rport->rport_info.lp_tag != fcs_port->lp_tag)
+ continue;
+ bfa_itnim_clear_stats(itnim);
+ }
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_lport_get_iostats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_fcs_lport_s *fcs_port;
+ struct bfa_bsg_lport_iostats_s *iocmd =
+ (struct bfa_bsg_lport_iostats_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->pwwn);
+ if (fcs_port == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ goto out;
+ }
+
+ bfa_fcpim_port_iostats(&bfad->bfa, &iocmd->iostats,
+ fcs_port->lp_tag);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_lport_get_rports(struct bfad_s *bfad, void *cmd,
+ unsigned int payload_len)
+{
+ struct bfa_bsg_lport_get_rports_s *iocmd =
+ (struct bfa_bsg_lport_get_rports_s *)cmd;
+ struct bfa_fcs_lport_s *fcs_port;
+ unsigned long flags;
+ void *iocmd_bufptr;
+
+ if (iocmd->nrports == 0)
+ return -EINVAL;
+
+ if (bfad_chk_iocmd_sz(payload_len,
+ sizeof(struct bfa_bsg_lport_get_rports_s),
+ sizeof(wwn_t) * iocmd->nrports) != BFA_STATUS_OK) {
+ iocmd->status = BFA_STATUS_VERSION_FAIL;
+ return 0;
+ }
+
+ iocmd_bufptr = (char *)iocmd +
+ sizeof(struct bfa_bsg_lport_get_rports_s);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->pwwn);
+ if (fcs_port == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ bfa_trc(bfad, 0);
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ goto out;
+ }
+
+ bfa_fcs_lport_get_rports(fcs_port, (wwn_t *)iocmd_bufptr,
+ &iocmd->nrports);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_rport_get_attr(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_rport_attr_s *iocmd = (struct bfa_bsg_rport_attr_s *)cmd;
+ struct bfa_fcs_lport_s *fcs_port;
+ struct bfa_fcs_rport_s *fcs_rport;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->pwwn);
+ if (fcs_port == NULL) {
+ bfa_trc(bfad, 0);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ goto out;
+ }
+
+ fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
+ if (fcs_rport == NULL) {
+ bfa_trc(bfad, 0);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+ goto out;
+ }
+
+ bfa_fcs_rport_get_attr(fcs_rport, &iocmd->attr);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+static int
+bfad_iocmd_rport_get_addr(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_rport_scsi_addr_s *iocmd =
+ (struct bfa_bsg_rport_scsi_addr_s *)cmd;
+ struct bfa_fcs_lport_s *fcs_port;
+ struct bfa_fcs_itnim_s *fcs_itnim;
+ struct bfad_itnim_s *drv_itnim;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->pwwn);
+ if (fcs_port == NULL) {
+ bfa_trc(bfad, 0);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ goto out;
+ }
+
+ fcs_itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
+ if (fcs_itnim == NULL) {
+ bfa_trc(bfad, 0);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+ goto out;
+ }
+
+ drv_itnim = fcs_itnim->itnim_drv;
+
+ if (drv_itnim && drv_itnim->im_port)
+ iocmd->host = drv_itnim->im_port->shost->host_no;
+ else {
+ bfa_trc(bfad, 0);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+ goto out;
+ }
+
+ iocmd->target = drv_itnim->scsi_tgt_id;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ iocmd->bus = 0;
+ iocmd->lun = 0;
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_rport_get_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_rport_stats_s *iocmd =
+ (struct bfa_bsg_rport_stats_s *)cmd;
+ struct bfa_fcs_lport_s *fcs_port;
+ struct bfa_fcs_rport_s *fcs_rport;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->pwwn);
+ if (fcs_port == NULL) {
+ bfa_trc(bfad, 0);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ goto out;
+ }
+
+ fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
+ if (fcs_rport == NULL) {
+ bfa_trc(bfad, 0);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+ goto out;
+ }
+
+ memcpy((void *)&iocmd->stats, (void *)&fcs_rport->stats,
+ sizeof(struct bfa_rport_stats_s));
+ memcpy((void *)&iocmd->stats.hal_stats,
+ (void *)&(bfa_fcs_rport_get_halrport(fcs_rport)->stats),
+ sizeof(struct bfa_rport_hal_stats_s));
+
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_rport_clr_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_rport_reset_stats_s *iocmd =
+ (struct bfa_bsg_rport_reset_stats_s *)cmd;
+ struct bfa_fcs_lport_s *fcs_port;
+ struct bfa_fcs_rport_s *fcs_rport;
+ struct bfa_rport_s *rport;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->pwwn);
+ if (fcs_port == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ goto out;
+ }
+
+ fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
+ if (fcs_rport == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+ goto out;
+ }
+
+ memset((char *)&fcs_rport->stats, 0, sizeof(struct bfa_rport_stats_s));
+ rport = bfa_fcs_rport_get_halrport(fcs_rport);
+ memset(&rport->stats, 0, sizeof(rport->stats));
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_rport_set_speed(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_rport_set_speed_s *iocmd =
+ (struct bfa_bsg_rport_set_speed_s *)cmd;
+ struct bfa_fcs_lport_s *fcs_port;
+ struct bfa_fcs_rport_s *fcs_rport;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->pwwn);
+ if (fcs_port == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ goto out;
+ }
+
+ fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
+ if (fcs_rport == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+ goto out;
+ }
+
+ fcs_rport->rpf.assigned_speed = iocmd->speed;
+ /* Set this speed in f/w only if the RPSC speed is not available */
+ if (fcs_rport->rpf.rpsc_speed == BFA_PORT_SPEED_UNKNOWN)
+ bfa_rport_speed(fcs_rport->bfa_rport, iocmd->speed);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_vport_get_attr(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_fcs_vport_s *fcs_vport;
+ struct bfa_bsg_vport_attr_s *iocmd = (struct bfa_bsg_vport_attr_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->vpwwn);
+ if (fcs_vport == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
+ goto out;
+ }
+
+ bfa_fcs_vport_get_attr(fcs_vport, &iocmd->vport_attr);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_vport_get_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_fcs_vport_s *fcs_vport;
+ struct bfa_bsg_vport_stats_s *iocmd =
+ (struct bfa_bsg_vport_stats_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->vpwwn);
+ if (fcs_vport == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
+ goto out;
+ }
+
+ memcpy((void *)&iocmd->vport_stats, (void *)&fcs_vport->vport_stats,
+ sizeof(struct bfa_vport_stats_s));
+ memcpy((void *)&iocmd->vport_stats.port_stats,
+ (void *)&fcs_vport->lport.stats,
+ sizeof(struct bfa_lport_stats_s));
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_vport_clr_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_fcs_vport_s *fcs_vport;
+ struct bfa_bsg_reset_stats_s *iocmd =
+ (struct bfa_bsg_reset_stats_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->vpwwn);
+ if (fcs_vport == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
+ goto out;
+ }
+
+ memset(&fcs_vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s));
+ memset(&fcs_vport->lport.stats, 0, sizeof(struct bfa_lport_stats_s));
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+static int
+bfad_iocmd_fabric_get_lports(struct bfad_s *bfad, void *cmd,
+ unsigned int payload_len)
+{
+ struct bfa_bsg_fabric_get_lports_s *iocmd =
+ (struct bfa_bsg_fabric_get_lports_s *)cmd;
+ bfa_fcs_vf_t *fcs_vf;
+ uint32_t nports = iocmd->nports;
+ unsigned long flags;
+ void *iocmd_bufptr;
+
+ if (nports == 0) {
+ iocmd->status = BFA_STATUS_EINVAL;
+ goto out;
+ }
+
+ if (bfad_chk_iocmd_sz(payload_len,
+ sizeof(struct bfa_bsg_fabric_get_lports_s),
+ sizeof(wwn_t[iocmd->nports])) != BFA_STATUS_OK) {
+ iocmd->status = BFA_STATUS_VERSION_FAIL;
+ goto out;
+ }
+
+ iocmd_bufptr = (char *)iocmd +
+ sizeof(struct bfa_bsg_fabric_get_lports_s);
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
+ if (fcs_vf == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_VFID;
+ goto out;
+ }
+ bfa_fcs_vf_get_ports(fcs_vf, (wwn_t *)iocmd_bufptr, &nports);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ iocmd->nports = nports;
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_ratelim(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+
+ if (cmd == IOCMD_RATELIM_ENABLE)
+ fcport->cfg.ratelimit = BFA_TRUE;
+ else if (cmd == IOCMD_RATELIM_DISABLE)
+ fcport->cfg.ratelimit = BFA_FALSE;
+
+ if (fcport->cfg.trl_def_speed == BFA_PORT_SPEED_UNKNOWN)
+ fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS;
+
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+
+ return 0;
+}
+
+int
+bfad_iocmd_ratelim_speed(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
+{
+ struct bfa_bsg_trl_speed_s *iocmd = (struct bfa_bsg_trl_speed_s *)pcmd;
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+
+ /* Auto and speeds greater than the supported speed, are invalid */
+ if ((iocmd->speed == BFA_PORT_SPEED_AUTO) ||
+ (iocmd->speed > fcport->speed_sup)) {
+ iocmd->status = BFA_STATUS_UNSUPP_SPEED;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return 0;
+ }
+
+ fcport->cfg.trl_def_speed = iocmd->speed;
+ iocmd->status = BFA_STATUS_OK;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return 0;
+}
+
+int
+bfad_iocmd_cfg_fcpim(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_fcpim_s *iocmd = (struct bfa_bsg_fcpim_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfa_fcpim_path_tov_set(&bfad->bfa, iocmd->param);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+int
+bfad_iocmd_fcpim_get_modstats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_fcpim_modstats_s *iocmd =
+ (struct bfa_bsg_fcpim_modstats_s *)cmd;
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
+ struct list_head *qe, *qen;
+ struct bfa_itnim_s *itnim;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ /* accumulate IO stats from itnim */
+ memset((void *)&iocmd->modstats, 0, sizeof(struct bfa_itnim_iostats_s));
+ list_for_each_safe(qe, qen, &fcpim->itnim_q) {
+ itnim = (struct bfa_itnim_s *) qe;
+ bfa_fcpim_add_stats(&iocmd->modstats, &(itnim->stats));
+ }
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+int
+bfad_iocmd_fcpim_clr_modstats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_fcpim_modstatsclr_s *iocmd =
+ (struct bfa_bsg_fcpim_modstatsclr_s *)cmd;
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
+ struct list_head *qe, *qen;
+ struct bfa_itnim_s *itnim;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ list_for_each_safe(qe, qen, &fcpim->itnim_q) {
+ itnim = (struct bfa_itnim_s *) qe;
+ bfa_itnim_clear_stats(itnim);
+ }
+ memset(&fcpim->del_itn_stats, 0,
+ sizeof(struct bfa_fcpim_del_itn_stats_s));
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+int
+bfad_iocmd_fcpim_get_del_itn_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_fcpim_del_itn_stats_s *iocmd =
+ (struct bfa_bsg_fcpim_del_itn_stats_s *)cmd;
+ struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ memcpy((void *)&iocmd->modstats, (void *)&fcpim->del_itn_stats,
+ sizeof(struct bfa_fcpim_del_itn_stats_s));
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+static int
+bfad_iocmd_itnim_get_attr(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_itnim_attr_s *iocmd = (struct bfa_bsg_itnim_attr_s *)cmd;
+ struct bfa_fcs_lport_s *fcs_port;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->lpwwn);
+ if (!fcs_port)
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ else
+ iocmd->status = bfa_fcs_itnim_attr_get(fcs_port,
+ iocmd->rpwwn, &iocmd->attr);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return 0;
+}
+
+static int
+bfad_iocmd_itnim_get_iostats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_itnim_iostats_s *iocmd =
+ (struct bfa_bsg_itnim_iostats_s *)cmd;
+ struct bfa_fcs_lport_s *fcs_port;
+ struct bfa_fcs_itnim_s *itnim;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->lpwwn);
+ if (!fcs_port) {
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ bfa_trc(bfad, 0);
+ } else {
+ itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
+ if (itnim == NULL)
+ iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+ else {
+ iocmd->status = BFA_STATUS_OK;
+ memcpy((void *)&iocmd->iostats, (void *)
+ &(bfa_fcs_itnim_get_halitn(itnim)->stats),
+ sizeof(struct bfa_itnim_iostats_s));
+ }
+ }
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return 0;
+}
+
+static int
+bfad_iocmd_itnim_reset_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_rport_reset_stats_s *iocmd =
+ (struct bfa_bsg_rport_reset_stats_s *)cmd;
+ struct bfa_fcs_lport_s *fcs_port;
+ struct bfa_fcs_itnim_s *itnim;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->pwwn);
+ if (!fcs_port)
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ else {
+ itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
+ if (itnim == NULL)
+ iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+ else {
+ iocmd->status = BFA_STATUS_OK;
+ bfa_fcs_itnim_stats_clear(fcs_port, iocmd->rpwwn);
+ bfa_itnim_clear_stats(bfa_fcs_itnim_get_halitn(itnim));
+ }
+ }
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return 0;
+}
+
+static int
+bfad_iocmd_itnim_get_itnstats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_itnim_itnstats_s *iocmd =
+ (struct bfa_bsg_itnim_itnstats_s *)cmd;
+ struct bfa_fcs_lport_s *fcs_port;
+ struct bfa_fcs_itnim_s *itnim;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->lpwwn);
+ if (!fcs_port) {
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ bfa_trc(bfad, 0);
+ } else {
+ itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
+ if (itnim == NULL)
+ iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+ else {
+ iocmd->status = BFA_STATUS_OK;
+ bfa_fcs_itnim_stats_get(fcs_port, iocmd->rpwwn,
+ &iocmd->itnstats);
+ }
+ }
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return 0;
+}
+
+int
+bfad_iocmd_fcport_enable(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_fcport_enable(&bfad->bfa);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return 0;
+}
+
+int
+bfad_iocmd_fcport_disable(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_fcport_disable(&bfad->bfa);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return 0;
+}
+
+int
+bfad_iocmd_ioc_get_pcifn_cfg(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_pcifn_cfg_s *iocmd = (struct bfa_bsg_pcifn_cfg_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_ablk_query(&bfad->bfa.modules.ablk,
+ &iocmd->pcifn_cfg,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_pcifn_create(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_ablk_pf_create(&bfad->bfa.modules.ablk,
+ &iocmd->pcifn_id, iocmd->port,
+ iocmd->pcifn_class, iocmd->bandwidth,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_pcifn_delete(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_ablk_pf_delete(&bfad->bfa.modules.ablk,
+ iocmd->pcifn_id,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_pcifn_bw(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_ablk_pf_update(&bfad->bfa.modules.ablk,
+ iocmd->pcifn_id, iocmd->bandwidth,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ bfa_trc(bfad, iocmd->status);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+ bfa_trc(bfad, iocmd->status);
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_adapter_cfg_mode(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_adapter_cfg_mode_s *iocmd =
+ (struct bfa_bsg_adapter_cfg_mode_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags = 0;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_ablk_adapter_config(&bfad->bfa.modules.ablk,
+ iocmd->cfg.mode, iocmd->cfg.max_pf,
+ iocmd->cfg.max_vf, bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_port_cfg_mode(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_port_cfg_mode_s *iocmd =
+ (struct bfa_bsg_port_cfg_mode_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags = 0;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_ablk_port_config(&bfad->bfa.modules.ablk,
+ iocmd->instance, iocmd->cfg.mode,
+ iocmd->cfg.max_pf, iocmd->cfg.max_vf,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_ablk_optrom(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ if (cmd == IOCMD_FLASH_ENABLE_OPTROM)
+ iocmd->status = bfa_ablk_optrom_en(&bfad->bfa.modules.ablk,
+ bfad_hcb_comp, &fcomp);
+ else
+ iocmd->status = bfa_ablk_optrom_dis(&bfad->bfa.modules.ablk,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_faa_enable(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ unsigned long flags;
+ struct bfad_hal_comp fcomp;
+
+ init_completion(&fcomp.comp);
+ iocmd->status = BFA_STATUS_OK;
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_faa_enable(&bfad->bfa, bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_faa_disable(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ unsigned long flags;
+ struct bfad_hal_comp fcomp;
+
+ init_completion(&fcomp.comp);
+ iocmd->status = BFA_STATUS_OK;
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_faa_disable(&bfad->bfa, bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_faa_query(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_faa_attr_s *iocmd = (struct bfa_bsg_faa_attr_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ iocmd->status = BFA_STATUS_OK;
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_faa_query(&bfad->bfa, &iocmd->faa_attr,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_cee_attr(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
+{
+ struct bfa_bsg_cee_attr_s *iocmd =
+ (struct bfa_bsg_cee_attr_s *)cmd;
+ void *iocmd_bufptr;
+ struct bfad_hal_comp cee_comp;
+ unsigned long flags;
+
+ if (bfad_chk_iocmd_sz(payload_len,
+ sizeof(struct bfa_bsg_cee_attr_s),
+ sizeof(struct bfa_cee_attr_s)) != BFA_STATUS_OK) {
+ iocmd->status = BFA_STATUS_VERSION_FAIL;
+ return 0;
+ }
+
+ iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_attr_s);
+
+ cee_comp.status = 0;
+ init_completion(&cee_comp.comp);
+ mutex_lock(&bfad_mutex);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_cee_get_attr(&bfad->bfa.modules.cee, iocmd_bufptr,
+ bfad_hcb_comp, &cee_comp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK) {
+ mutex_unlock(&bfad_mutex);
+ bfa_trc(bfad, 0x5555);
+ goto out;
+ }
+ wait_for_completion(&cee_comp.comp);
+ mutex_unlock(&bfad_mutex);
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_cee_get_stats(struct bfad_s *bfad, void *cmd,
+ unsigned int payload_len)
+{
+ struct bfa_bsg_cee_stats_s *iocmd =
+ (struct bfa_bsg_cee_stats_s *)cmd;
+ void *iocmd_bufptr;
+ struct bfad_hal_comp cee_comp;
+ unsigned long flags;
+
+ if (bfad_chk_iocmd_sz(payload_len,
+ sizeof(struct bfa_bsg_cee_stats_s),
+ sizeof(struct bfa_cee_stats_s)) != BFA_STATUS_OK) {
+ iocmd->status = BFA_STATUS_VERSION_FAIL;
+ return 0;
+ }
+
+ iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_stats_s);
+
+ cee_comp.status = 0;
+ init_completion(&cee_comp.comp);
+ mutex_lock(&bfad_mutex);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_cee_get_stats(&bfad->bfa.modules.cee, iocmd_bufptr,
+ bfad_hcb_comp, &cee_comp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK) {
+ mutex_unlock(&bfad_mutex);
+ bfa_trc(bfad, 0x5555);
+ goto out;
+ }
+ wait_for_completion(&cee_comp.comp);
+ mutex_unlock(&bfad_mutex);
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_cee_reset_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_cee_reset_stats(&bfad->bfa.modules.cee, NULL, NULL);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ bfa_trc(bfad, 0x5555);
+ return 0;
+}
+
+int
+bfad_iocmd_sfp_media(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_sfp_media_s *iocmd = (struct bfa_bsg_sfp_media_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_sfp_media(BFA_SFP_MOD(&bfad->bfa), &iocmd->media,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ bfa_trc(bfad, iocmd->status);
+ if (iocmd->status != BFA_STATUS_SFP_NOT_READY)
+ goto out;
+
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_sfp_speed(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_sfp_speed_s *iocmd = (struct bfa_bsg_sfp_speed_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_sfp_speed(BFA_SFP_MOD(&bfad->bfa), iocmd->speed,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ bfa_trc(bfad, iocmd->status);
+ if (iocmd->status != BFA_STATUS_SFP_NOT_READY)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_flash_get_attr(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_flash_attr_s *iocmd =
+ (struct bfa_bsg_flash_attr_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_flash_get_attr(BFA_FLASH(&bfad->bfa), &iocmd->attr,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_flash_erase_part(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_flash_erase_part(BFA_FLASH(&bfad->bfa), iocmd->type,
+ iocmd->instance, bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_flash_update_part(struct bfad_s *bfad, void *cmd,
+ unsigned int payload_len)
+{
+ struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
+ void *iocmd_bufptr;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ if (bfad_chk_iocmd_sz(payload_len,
+ sizeof(struct bfa_bsg_flash_s),
+ iocmd->bufsz) != BFA_STATUS_OK) {
+ iocmd->status = BFA_STATUS_VERSION_FAIL;
+ return 0;
+ }
+
+ iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s);
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
+ iocmd->type, iocmd->instance, iocmd_bufptr,
+ iocmd->bufsz, 0, bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_flash_read_part(struct bfad_s *bfad, void *cmd,
+ unsigned int payload_len)
+{
+ struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ void *iocmd_bufptr;
+ unsigned long flags;
+
+ if (bfad_chk_iocmd_sz(payload_len,
+ sizeof(struct bfa_bsg_flash_s),
+ iocmd->bufsz) != BFA_STATUS_OK) {
+ iocmd->status = BFA_STATUS_VERSION_FAIL;
+ return 0;
+ }
+
+ iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s);
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa), iocmd->type,
+ iocmd->instance, iocmd_bufptr, iocmd->bufsz, 0,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_diag_temp(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_diag_get_temp_s *iocmd =
+ (struct bfa_bsg_diag_get_temp_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_diag_tsensor_query(BFA_DIAG_MOD(&bfad->bfa),
+ &iocmd->result, bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ bfa_trc(bfad, iocmd->status);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_diag_memtest(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_diag_memtest_s *iocmd =
+ (struct bfa_bsg_diag_memtest_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_diag_memtest(BFA_DIAG_MOD(&bfad->bfa),
+ &iocmd->memtest, iocmd->pat,
+ &iocmd->result, bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ bfa_trc(bfad, iocmd->status);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_diag_loopback(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_diag_loopback_s *iocmd =
+ (struct bfa_bsg_diag_loopback_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_fcdiag_loopback(&bfad->bfa, iocmd->opmode,
+ iocmd->speed, iocmd->lpcnt, iocmd->pat,
+ &iocmd->result, bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ bfa_trc(bfad, iocmd->status);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_diag_fwping(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_diag_fwping_s *iocmd =
+ (struct bfa_bsg_diag_fwping_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_diag_fwping(BFA_DIAG_MOD(&bfad->bfa), iocmd->cnt,
+ iocmd->pattern, &iocmd->result,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ bfa_trc(bfad, iocmd->status);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ bfa_trc(bfad, 0x77771);
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_diag_queuetest(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_diag_qtest_s *iocmd = (struct bfa_bsg_diag_qtest_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_fcdiag_queuetest(&bfad->bfa, iocmd->force,
+ iocmd->queue, &iocmd->result,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_diag_sfp(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_sfp_show_s *iocmd =
+ (struct bfa_bsg_sfp_show_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_sfp_show(BFA_SFP_MOD(&bfad->bfa), &iocmd->sfp,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ bfa_trc(bfad, iocmd->status);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+ bfa_trc(bfad, iocmd->status);
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_diag_led(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_diag_led_s *iocmd = (struct bfa_bsg_diag_led_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_diag_ledtest(BFA_DIAG_MOD(&bfad->bfa),
+ &iocmd->ledtest);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return 0;
+}
+
+int
+bfad_iocmd_diag_beacon_lport(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_diag_beacon_s *iocmd =
+ (struct bfa_bsg_diag_beacon_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_diag_beacon_port(BFA_DIAG_MOD(&bfad->bfa),
+ iocmd->beacon, iocmd->link_e2e_beacon,
+ iocmd->second);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return 0;
+}
+
+int
+bfad_iocmd_diag_lb_stat(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_diag_lb_stat_s *iocmd =
+ (struct bfa_bsg_diag_lb_stat_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_fcdiag_lb_is_running(&bfad->bfa);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ bfa_trc(bfad, iocmd->status);
+
+ return 0;
+}
+
+int
+bfad_iocmd_phy_get_attr(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_phy_attr_s *iocmd =
+ (struct bfa_bsg_phy_attr_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_phy_get_attr(BFA_PHY(&bfad->bfa), iocmd->instance,
+ &iocmd->attr, bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_phy_get_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_phy_stats_s *iocmd =
+ (struct bfa_bsg_phy_stats_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_phy_get_stats(BFA_PHY(&bfad->bfa), iocmd->instance,
+ &iocmd->stats, bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_phy_read(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
+{
+ struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ void *iocmd_bufptr;
+ unsigned long flags;
+
+ if (bfad_chk_iocmd_sz(payload_len,
+ sizeof(struct bfa_bsg_phy_s),
+ iocmd->bufsz) != BFA_STATUS_OK) {
+ iocmd->status = BFA_STATUS_VERSION_FAIL;
+ return 0;
+ }
+
+ iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s);
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_phy_read(BFA_PHY(&bfad->bfa),
+ iocmd->instance, iocmd_bufptr, iocmd->bufsz,
+ 0, bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_vhba_query(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_vhba_attr_s *iocmd =
+ (struct bfa_bsg_vhba_attr_s *)cmd;
+ struct bfa_vhba_attr_s *attr = &iocmd->attr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ attr->pwwn = bfad->bfa.ioc.attr->pwwn;
+ attr->nwwn = bfad->bfa.ioc.attr->nwwn;
+ attr->plog_enabled = (bfa_boolean_t)bfad->bfa.plog->plog_enabled;
+ attr->io_profile = bfa_fcpim_get_io_profile(&bfad->bfa);
+ attr->path_tov = bfa_fcpim_path_tov_get(&bfad->bfa);
+ iocmd->status = BFA_STATUS_OK;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return 0;
+}
+
+int
+bfad_iocmd_phy_update(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
+{
+ struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd;
+ void *iocmd_bufptr;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ if (bfad_chk_iocmd_sz(payload_len,
+ sizeof(struct bfa_bsg_phy_s),
+ iocmd->bufsz) != BFA_STATUS_OK) {
+ iocmd->status = BFA_STATUS_VERSION_FAIL;
+ return 0;
+ }
+
+ iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s);
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_phy_update(BFA_PHY(&bfad->bfa),
+ iocmd->instance, iocmd_bufptr, iocmd->bufsz,
+ 0, bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_porglog_get(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd;
+ void *iocmd_bufptr;
+
+ if (iocmd->bufsz < sizeof(struct bfa_plog_s)) {
+ bfa_trc(bfad, sizeof(struct bfa_plog_s));
+ iocmd->status = BFA_STATUS_EINVAL;
+ goto out;
+ }
+
+ iocmd->status = BFA_STATUS_OK;
+ iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s);
+ memcpy(iocmd_bufptr, (u8 *) &bfad->plog_buf, sizeof(struct bfa_plog_s));
+out:
+ return 0;
+}
+
+#define BFA_DEBUG_FW_CORE_CHUNK_SZ 0x4000U /* 16K chunks for FW dump */
+int
+bfad_iocmd_debug_fw_core(struct bfad_s *bfad, void *cmd,
+ unsigned int payload_len)
+{
+ struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd;
+ void *iocmd_bufptr;
+ unsigned long flags;
+
+ if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_debug_s),
+ BFA_DEBUG_FW_CORE_CHUNK_SZ) != BFA_STATUS_OK) {
+ iocmd->status = BFA_STATUS_VERSION_FAIL;
+ return 0;
+ }
+
+ if (iocmd->bufsz < BFA_DEBUG_FW_CORE_CHUNK_SZ ||
+ !IS_ALIGNED(iocmd->bufsz, sizeof(u16)) ||
+ !IS_ALIGNED(iocmd->offset, sizeof(u32))) {
+ bfa_trc(bfad, BFA_DEBUG_FW_CORE_CHUNK_SZ);
+ iocmd->status = BFA_STATUS_EINVAL;
+ goto out;
+ }
+
+ iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_ioc_debug_fwcore(&bfad->bfa.ioc, iocmd_bufptr,
+ (u32 *)&iocmd->offset, &iocmd->bufsz);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_debug_ctl(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ unsigned long flags;
+
+ if (v_cmd == IOCMD_DEBUG_FW_STATE_CLR) {
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ bfad->bfa.ioc.dbg_fwsave_once = BFA_TRUE;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ } else if (v_cmd == IOCMD_DEBUG_PORTLOG_CLR)
+ bfad->plog_buf.head = bfad->plog_buf.tail = 0;
+ else if (v_cmd == IOCMD_DEBUG_START_DTRC)
+ bfa_trc_init(bfad->trcmod);
+ else if (v_cmd == IOCMD_DEBUG_STOP_DTRC)
+ bfa_trc_stop(bfad->trcmod);
+
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+int
+bfad_iocmd_porglog_ctl(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_portlogctl_s *iocmd = (struct bfa_bsg_portlogctl_s *)cmd;
+
+ if (iocmd->ctl == BFA_TRUE)
+ bfad->plog_buf.plog_enabled = 1;
+ else
+ bfad->plog_buf.plog_enabled = 0;
+
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+int
+bfad_iocmd_fcpim_cfg_profile(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+ struct bfa_bsg_fcpim_profile_s *iocmd =
+ (struct bfa_bsg_fcpim_profile_s *)cmd;
+ struct timeval tv;
+ unsigned long flags;
+
+ do_gettimeofday(&tv);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ if (v_cmd == IOCMD_FCPIM_PROFILE_ON)
+ iocmd->status = bfa_fcpim_profile_on(&bfad->bfa, tv.tv_sec);
+ else if (v_cmd == IOCMD_FCPIM_PROFILE_OFF)
+ iocmd->status = bfa_fcpim_profile_off(&bfad->bfa);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return 0;
+}
+
+static int
+bfad_iocmd_itnim_get_ioprofile(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_itnim_ioprofile_s *iocmd =
+ (struct bfa_bsg_itnim_ioprofile_s *)cmd;
+ struct bfa_fcs_lport_s *fcs_port;
+ struct bfa_fcs_itnim_s *itnim;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
+ iocmd->vf_id, iocmd->lpwwn);
+ if (!fcs_port)
+ iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
+ else {
+ itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
+ if (itnim == NULL)
+ iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
+ else
+ iocmd->status = bfa_itnim_get_ioprofile(
+ bfa_fcs_itnim_get_halitn(itnim),
+ &iocmd->ioprofile);
+ }
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return 0;
+}
+
+int
+bfad_iocmd_fcport_get_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_fcport_stats_s *iocmd =
+ (struct bfa_bsg_fcport_stats_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+ struct bfa_cb_pending_q_s cb_qe;
+
+ init_completion(&fcomp.comp);
+ bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
+ &fcomp, &iocmd->stats);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK) {
+ bfa_trc(bfad, iocmd->status);
+ goto out;
+ }
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_fcport_reset_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+ struct bfa_cb_pending_q_s cb_qe;
+
+ init_completion(&fcomp.comp);
+ bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, &fcomp, NULL);
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK) {
+ bfa_trc(bfad, iocmd->status);
+ goto out;
+ }
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_boot_cfg(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
+ BFA_FLASH_PART_BOOT, PCI_FUNC(bfad->pcidev->devfn),
+ &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_boot_query(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa),
+ BFA_FLASH_PART_BOOT, PCI_FUNC(bfad->pcidev->devfn),
+ &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_preboot_query(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_preboot_s *iocmd = (struct bfa_bsg_preboot_s *)cmd;
+ struct bfi_iocfc_cfgrsp_s *cfgrsp = bfad->bfa.iocfc.cfgrsp;
+ struct bfa_boot_pbc_s *pbcfg = &iocmd->cfg;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled;
+ pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns;
+ pbcfg->speed = cfgrsp->pbc_cfg.port_speed;
+ memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun));
+ iocmd->status = BFA_STATUS_OK;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return 0;
+}
+
+int
+bfad_iocmd_ethboot_cfg(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
+ BFA_FLASH_PART_PXECFG,
+ bfad->bfa.ioc.port_id, &iocmd->cfg,
+ sizeof(struct bfa_ethboot_cfg_s), 0,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_ethboot_query(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa),
+ BFA_FLASH_PART_PXECFG,
+ bfad->bfa.ioc.port_id, &iocmd->cfg,
+ sizeof(struct bfa_ethboot_cfg_s), 0,
+ bfad_hcb_comp, &fcomp);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK)
+ goto out;
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_cfg_trunk(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+ struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+
+ if (v_cmd == IOCMD_TRUNK_ENABLE) {
+ trunk->attr.state = BFA_TRUNK_OFFLINE;
+ bfa_fcport_disable(&bfad->bfa);
+ fcport->cfg.trunked = BFA_TRUE;
+ } else if (v_cmd == IOCMD_TRUNK_DISABLE) {
+ trunk->attr.state = BFA_TRUNK_DISABLED;
+ bfa_fcport_disable(&bfad->bfa);
+ fcport->cfg.trunked = BFA_FALSE;
+ }
+
+ if (!bfa_fcport_is_disabled(&bfad->bfa))
+ bfa_fcport_enable(&bfad->bfa);
+
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+int
+bfad_iocmd_trunk_get_attr(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_trunk_attr_s *iocmd = (struct bfa_bsg_trunk_attr_s *)cmd;
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+ struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ memcpy((void *)&iocmd->attr, (void *)&trunk->attr,
+ sizeof(struct bfa_trunk_attr_s));
+ iocmd->attr.port_id = bfa_lps_get_base_pid(&bfad->bfa);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+int
+bfad_iocmd_qos(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) {
+ if (v_cmd == IOCMD_QOS_ENABLE)
+ fcport->cfg.qos_enabled = BFA_TRUE;
+ else if (v_cmd == IOCMD_QOS_DISABLE)
+ fcport->cfg.qos_enabled = BFA_FALSE;
+ }
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+int
+bfad_iocmd_qos_get_attr(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_qos_attr_s *iocmd = (struct bfa_bsg_qos_attr_s *)cmd;
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->attr.state = fcport->qos_attr.state;
+ iocmd->attr.total_bb_cr = be32_to_cpu(fcport->qos_attr.total_bb_cr);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+int
+bfad_iocmd_qos_get_vc_attr(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_qos_vc_attr_s *iocmd =
+ (struct bfa_bsg_qos_vc_attr_s *)cmd;
+ struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
+ struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr;
+ unsigned long flags;
+ u32 i = 0;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->attr.total_vc_count = be16_to_cpu(bfa_vc_attr->total_vc_count);
+ iocmd->attr.shared_credit = be16_to_cpu(bfa_vc_attr->shared_credit);
+ iocmd->attr.elp_opmode_flags =
+ be32_to_cpu(bfa_vc_attr->elp_opmode_flags);
+
+ /* Individual VC info */
+ while (i < iocmd->attr.total_vc_count) {
+ iocmd->attr.vc_info[i].vc_credit =
+ bfa_vc_attr->vc_info[i].vc_credit;
+ iocmd->attr.vc_info[i].borrow_credit =
+ bfa_vc_attr->vc_info[i].borrow_credit;
+ iocmd->attr.vc_info[i].priority =
+ bfa_vc_attr->vc_info[i].priority;
+ i++;
+ }
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ iocmd->status = BFA_STATUS_OK;
+ return 0;
+}
+
+int
+bfad_iocmd_qos_get_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_fcport_stats_s *iocmd =
+ (struct bfa_bsg_fcport_stats_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+ struct bfa_cb_pending_q_s cb_qe;
+
+ init_completion(&fcomp.comp);
+ bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
+ &fcomp, &iocmd->stats);
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
+ iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK) {
+ bfa_trc(bfad, iocmd->status);
+ goto out;
+ }
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_qos_reset_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
+ struct bfad_hal_comp fcomp;
+ unsigned long flags;
+ struct bfa_cb_pending_q_s cb_qe;
+
+ init_completion(&fcomp.comp);
+ bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
+ &fcomp, NULL);
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
+ iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ if (iocmd->status != BFA_STATUS_OK) {
+ bfa_trc(bfad, iocmd->status);
+ goto out;
+ }
+ wait_for_completion(&fcomp.comp);
+ iocmd->status = fcomp.status;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_vf_get_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_vf_stats_s *iocmd =
+ (struct bfa_bsg_vf_stats_s *)cmd;
+ struct bfa_fcs_fabric_s *fcs_vf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
+ if (fcs_vf == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_VFID;
+ goto out;
+ }
+ memcpy((void *)&iocmd->stats, (void *)&fcs_vf->stats,
+ sizeof(struct bfa_vf_stats_s));
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_vf_clr_stats(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_vf_reset_stats_s *iocmd =
+ (struct bfa_bsg_vf_reset_stats_s *)cmd;
+ struct bfa_fcs_fabric_s *fcs_vf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
+ if (fcs_vf == NULL) {
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_UNKNOWN_VFID;
+ goto out;
+ }
+ memset((void *)&fcs_vf->stats, 0, sizeof(struct bfa_vf_stats_s));
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ iocmd->status = BFA_STATUS_OK;
+out:
+ return 0;
+}
+
+int
+bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd)
+{
+ struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE)
+ iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_TRUE);
+ else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE)
+ iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_FALSE);
+ else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR)
+ iocmd->status = bfa_fcpim_lunmask_clear(&bfad->bfa);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return 0;
+}
+
+int
+bfad_iocmd_fcpim_lunmask_query(struct bfad_s *bfad, void *cmd)
+{
+ struct bfa_bsg_fcpim_lunmask_query_s *iocmd =
+ (struct bfa_bsg_fcpim_lunmask_query_s *)cmd;
+ struct bfa_lunmask_cfg_s *lun_mask = &iocmd->lun_mask;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ iocmd->status = bfa_fcpim_lunmask_query(&bfad->bfa, lun_mask);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return 0;
+}
+
+int
+bfad_iocmd_fcpim_cfg_lunmask(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
+{
+ struct bfa_bsg_fcpim_lunmask_s *iocmd =
+ (struct bfa_bsg_fcpim_lunmask_s *)cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ if (v_cmd == IOCMD_FCPIM_LUNMASK_ADD)
+ iocmd->status = bfa_fcpim_lunmask_add(&bfad->bfa, iocmd->vf_id,
+ &iocmd->pwwn, iocmd->rpwwn, iocmd->lun);
+ else if (v_cmd == IOCMD_FCPIM_LUNMASK_DELETE)
+ iocmd->status = bfa_fcpim_lunmask_delete(&bfad->bfa,
+ iocmd->vf_id, &iocmd->pwwn,
+ iocmd->rpwwn, iocmd->lun);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return 0;
+}
+
+static int
+bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
+ unsigned int payload_len)
+{
+ int rc = -EINVAL;
+
+ switch (cmd) {
+ case IOCMD_IOC_ENABLE:
+ rc = bfad_iocmd_ioc_enable(bfad, iocmd);
+ break;
+ case IOCMD_IOC_DISABLE:
+ rc = bfad_iocmd_ioc_disable(bfad, iocmd);
+ break;
+ case IOCMD_IOC_GET_INFO:
+ rc = bfad_iocmd_ioc_get_info(bfad, iocmd);
+ break;
+ case IOCMD_IOC_GET_ATTR:
+ rc = bfad_iocmd_ioc_get_attr(bfad, iocmd);
+ break;
+ case IOCMD_IOC_GET_STATS:
+ rc = bfad_iocmd_ioc_get_stats(bfad, iocmd);
+ break;
+ case IOCMD_IOC_GET_FWSTATS:
+ rc = bfad_iocmd_ioc_get_fwstats(bfad, iocmd, payload_len);
+ break;
+ case IOCMD_IOC_RESET_STATS:
+ case IOCMD_IOC_RESET_FWSTATS:
+ rc = bfad_iocmd_ioc_reset_stats(bfad, iocmd, cmd);
+ break;
+ case IOCMD_IOC_SET_ADAPTER_NAME:
+ case IOCMD_IOC_SET_PORT_NAME:
+ rc = bfad_iocmd_ioc_set_name(bfad, iocmd, cmd);
+ break;
+ case IOCMD_IOCFC_GET_ATTR:
+ rc = bfad_iocmd_iocfc_get_attr(bfad, iocmd);
+ break;
+ case IOCMD_IOCFC_SET_INTR:
+ rc = bfad_iocmd_iocfc_set_intr(bfad, iocmd);
+ break;
+ case IOCMD_PORT_ENABLE:
+ rc = bfad_iocmd_port_enable(bfad, iocmd);
+ break;
+ case IOCMD_PORT_DISABLE:
+ rc = bfad_iocmd_port_disable(bfad, iocmd);
+ break;
+ case IOCMD_PORT_GET_ATTR:
+ rc = bfad_iocmd_port_get_attr(bfad, iocmd);
+ break;
+ case IOCMD_PORT_GET_STATS:
+ rc = bfad_iocmd_port_get_stats(bfad, iocmd, payload_len);
+ break;
+ case IOCMD_PORT_RESET_STATS:
+ rc = bfad_iocmd_port_reset_stats(bfad, iocmd);
+ break;
+ case IOCMD_PORT_CFG_TOPO:
+ case IOCMD_PORT_CFG_SPEED:
+ case IOCMD_PORT_CFG_ALPA:
+ case IOCMD_PORT_CLR_ALPA:
+ rc = bfad_iocmd_set_port_cfg(bfad, iocmd, cmd);
+ break;
+ case IOCMD_PORT_CFG_MAXFRSZ:
+ rc = bfad_iocmd_port_cfg_maxfrsize(bfad, iocmd);
+ break;
+ case IOCMD_PORT_BBSC_ENABLE:
+ case IOCMD_PORT_BBSC_DISABLE:
+ rc = bfad_iocmd_port_cfg_bbsc(bfad, iocmd, cmd);
+ break;
+ case IOCMD_LPORT_GET_ATTR:
+ rc = bfad_iocmd_lport_get_attr(bfad, iocmd);
+ break;
+ case IOCMD_LPORT_GET_STATS:
+ rc = bfad_iocmd_lport_get_stats(bfad, iocmd);
+ break;
+ case IOCMD_LPORT_RESET_STATS:
+ rc = bfad_iocmd_lport_reset_stats(bfad, iocmd);
+ break;
+ case IOCMD_LPORT_GET_IOSTATS:
+ rc = bfad_iocmd_lport_get_iostats(bfad, iocmd);
+ break;
+ case IOCMD_LPORT_GET_RPORTS:
+ rc = bfad_iocmd_lport_get_rports(bfad, iocmd, payload_len);
+ break;
+ case IOCMD_RPORT_GET_ATTR:
+ rc = bfad_iocmd_rport_get_attr(bfad, iocmd);
+ break;
+ case IOCMD_RPORT_GET_ADDR:
+ rc = bfad_iocmd_rport_get_addr(bfad, iocmd);
+ break;
+ case IOCMD_RPORT_GET_STATS:
+ rc = bfad_iocmd_rport_get_stats(bfad, iocmd);
+ break;
+ case IOCMD_RPORT_RESET_STATS:
+ rc = bfad_iocmd_rport_clr_stats(bfad, iocmd);
+ break;
+ case IOCMD_RPORT_SET_SPEED:
+ rc = bfad_iocmd_rport_set_speed(bfad, iocmd);
+ break;
+ case IOCMD_VPORT_GET_ATTR:
+ rc = bfad_iocmd_vport_get_attr(bfad, iocmd);
+ break;
+ case IOCMD_VPORT_GET_STATS:
+ rc = bfad_iocmd_vport_get_stats(bfad, iocmd);
+ break;
+ case IOCMD_VPORT_RESET_STATS:
+ rc = bfad_iocmd_vport_clr_stats(bfad, iocmd);
+ break;
+ case IOCMD_FABRIC_GET_LPORTS:
+ rc = bfad_iocmd_fabric_get_lports(bfad, iocmd, payload_len);
+ break;
+ case IOCMD_RATELIM_ENABLE:
+ case IOCMD_RATELIM_DISABLE:
+ rc = bfad_iocmd_ratelim(bfad, cmd, iocmd);
+ break;
+ case IOCMD_RATELIM_DEF_SPEED:
+ rc = bfad_iocmd_ratelim_speed(bfad, cmd, iocmd);
+ break;
+ case IOCMD_FCPIM_FAILOVER:
+ rc = bfad_iocmd_cfg_fcpim(bfad, iocmd);
+ break;
+ case IOCMD_FCPIM_MODSTATS:
+ rc = bfad_iocmd_fcpim_get_modstats(bfad, iocmd);
+ break;
+ case IOCMD_FCPIM_MODSTATSCLR:
+ rc = bfad_iocmd_fcpim_clr_modstats(bfad, iocmd);
+ break;
+ case IOCMD_FCPIM_DEL_ITN_STATS:
+ rc = bfad_iocmd_fcpim_get_del_itn_stats(bfad, iocmd);
+ break;
+ case IOCMD_ITNIM_GET_ATTR:
+ rc = bfad_iocmd_itnim_get_attr(bfad, iocmd);
+ break;
+ case IOCMD_ITNIM_GET_IOSTATS:
+ rc = bfad_iocmd_itnim_get_iostats(bfad, iocmd);
+ break;
+ case IOCMD_ITNIM_RESET_STATS:
+ rc = bfad_iocmd_itnim_reset_stats(bfad, iocmd);
+ break;
+ case IOCMD_ITNIM_GET_ITNSTATS:
+ rc = bfad_iocmd_itnim_get_itnstats(bfad, iocmd);
+ break;
+ case IOCMD_FCPORT_ENABLE:
+ rc = bfad_iocmd_fcport_enable(bfad, iocmd);
+ break;
+ case IOCMD_FCPORT_DISABLE:
+ rc = bfad_iocmd_fcport_disable(bfad, iocmd);
+ break;
+ case IOCMD_IOC_PCIFN_CFG:
+ rc = bfad_iocmd_ioc_get_pcifn_cfg(bfad, iocmd);
+ break;
+ case IOCMD_PCIFN_CREATE:
+ rc = bfad_iocmd_pcifn_create(bfad, iocmd);
+ break;
+ case IOCMD_PCIFN_DELETE:
+ rc = bfad_iocmd_pcifn_delete(bfad, iocmd);
+ break;
+ case IOCMD_PCIFN_BW:
+ rc = bfad_iocmd_pcifn_bw(bfad, iocmd);
+ break;
+ case IOCMD_ADAPTER_CFG_MODE:
+ rc = bfad_iocmd_adapter_cfg_mode(bfad, iocmd);
+ break;
+ case IOCMD_PORT_CFG_MODE:
+ rc = bfad_iocmd_port_cfg_mode(bfad, iocmd);
+ break;
+ case IOCMD_FLASH_ENABLE_OPTROM:
+ case IOCMD_FLASH_DISABLE_OPTROM:
+ rc = bfad_iocmd_ablk_optrom(bfad, cmd, iocmd);
+ break;
+ case IOCMD_FAA_ENABLE:
+ rc = bfad_iocmd_faa_enable(bfad, iocmd);
+ break;
+ case IOCMD_FAA_DISABLE:
+ rc = bfad_iocmd_faa_disable(bfad, iocmd);
+ break;
+ case IOCMD_FAA_QUERY:
+ rc = bfad_iocmd_faa_query(bfad, iocmd);
+ break;
+ case IOCMD_CEE_GET_ATTR:
+ rc = bfad_iocmd_cee_attr(bfad, iocmd, payload_len);
+ break;
+ case IOCMD_CEE_GET_STATS:
+ rc = bfad_iocmd_cee_get_stats(bfad, iocmd, payload_len);
+ break;
+ case IOCMD_CEE_RESET_STATS:
+ rc = bfad_iocmd_cee_reset_stats(bfad, iocmd);
+ break;
+ case IOCMD_SFP_MEDIA:
+ rc = bfad_iocmd_sfp_media(bfad, iocmd);
+ break;
+ case IOCMD_SFP_SPEED:
+ rc = bfad_iocmd_sfp_speed(bfad, iocmd);
+ break;
+ case IOCMD_FLASH_GET_ATTR:
+ rc = bfad_iocmd_flash_get_attr(bfad, iocmd);
+ break;
+ case IOCMD_FLASH_ERASE_PART:
+ rc = bfad_iocmd_flash_erase_part(bfad, iocmd);
+ break;
+ case IOCMD_FLASH_UPDATE_PART:
+ rc = bfad_iocmd_flash_update_part(bfad, iocmd, payload_len);
+ break;
+ case IOCMD_FLASH_READ_PART:
+ rc = bfad_iocmd_flash_read_part(bfad, iocmd, payload_len);
+ break;
+ case IOCMD_DIAG_TEMP:
+ rc = bfad_iocmd_diag_temp(bfad, iocmd);
+ break;
+ case IOCMD_DIAG_MEMTEST:
+ rc = bfad_iocmd_diag_memtest(bfad, iocmd);
+ break;
+ case IOCMD_DIAG_LOOPBACK:
+ rc = bfad_iocmd_diag_loopback(bfad, iocmd);
+ break;
+ case IOCMD_DIAG_FWPING:
+ rc = bfad_iocmd_diag_fwping(bfad, iocmd);
+ break;
+ case IOCMD_DIAG_QUEUETEST:
+ rc = bfad_iocmd_diag_queuetest(bfad, iocmd);
+ break;
+ case IOCMD_DIAG_SFP:
+ rc = bfad_iocmd_diag_sfp(bfad, iocmd);
+ break;
+ case IOCMD_DIAG_LED:
+ rc = bfad_iocmd_diag_led(bfad, iocmd);
+ break;
+ case IOCMD_DIAG_BEACON_LPORT:
+ rc = bfad_iocmd_diag_beacon_lport(bfad, iocmd);
+ break;
+ case IOCMD_DIAG_LB_STAT:
+ rc = bfad_iocmd_diag_lb_stat(bfad, iocmd);
+ break;
+ case IOCMD_PHY_GET_ATTR:
+ rc = bfad_iocmd_phy_get_attr(bfad, iocmd);
+ break;
+ case IOCMD_PHY_GET_STATS:
+ rc = bfad_iocmd_phy_get_stats(bfad, iocmd);
+ break;
+ case IOCMD_PHY_UPDATE_FW:
+ rc = bfad_iocmd_phy_update(bfad, iocmd, payload_len);
+ break;
+ case IOCMD_PHY_READ_FW:
+ rc = bfad_iocmd_phy_read(bfad, iocmd, payload_len);
+ break;
+ case IOCMD_VHBA_QUERY:
+ rc = bfad_iocmd_vhba_query(bfad, iocmd);
+ break;
+ case IOCMD_DEBUG_PORTLOG:
+ rc = bfad_iocmd_porglog_get(bfad, iocmd);
+ break;
+ case IOCMD_DEBUG_FW_CORE:
+ rc = bfad_iocmd_debug_fw_core(bfad, iocmd, payload_len);
+ break;
+ case IOCMD_DEBUG_FW_STATE_CLR:
+ case IOCMD_DEBUG_PORTLOG_CLR:
+ case IOCMD_DEBUG_START_DTRC:
+ case IOCMD_DEBUG_STOP_DTRC:
+ rc = bfad_iocmd_debug_ctl(bfad, iocmd, cmd);
+ break;
+ case IOCMD_DEBUG_PORTLOG_CTL:
+ rc = bfad_iocmd_porglog_ctl(bfad, iocmd);
+ break;
+ case IOCMD_FCPIM_PROFILE_ON:
+ case IOCMD_FCPIM_PROFILE_OFF:
+ rc = bfad_iocmd_fcpim_cfg_profile(bfad, iocmd, cmd);
+ break;
+ case IOCMD_ITNIM_GET_IOPROFILE:
+ rc = bfad_iocmd_itnim_get_ioprofile(bfad, iocmd);
+ break;
+ case IOCMD_FCPORT_GET_STATS:
+ rc = bfad_iocmd_fcport_get_stats(bfad, iocmd);
+ break;
+ case IOCMD_FCPORT_RESET_STATS:
+ rc = bfad_iocmd_fcport_reset_stats(bfad, iocmd);
+ break;
+ case IOCMD_BOOT_CFG:
+ rc = bfad_iocmd_boot_cfg(bfad, iocmd);
+ break;
+ case IOCMD_BOOT_QUERY:
+ rc = bfad_iocmd_boot_query(bfad, iocmd);
+ break;
+ case IOCMD_PREBOOT_QUERY:
+ rc = bfad_iocmd_preboot_query(bfad, iocmd);
+ break;
+ case IOCMD_ETHBOOT_CFG:
+ rc = bfad_iocmd_ethboot_cfg(bfad, iocmd);
+ break;
+ case IOCMD_ETHBOOT_QUERY:
+ rc = bfad_iocmd_ethboot_query(bfad, iocmd);
+ break;
+ case IOCMD_TRUNK_ENABLE:
+ case IOCMD_TRUNK_DISABLE:
+ rc = bfad_iocmd_cfg_trunk(bfad, iocmd, cmd);
+ break;
+ case IOCMD_TRUNK_GET_ATTR:
+ rc = bfad_iocmd_trunk_get_attr(bfad, iocmd);
+ break;
+ case IOCMD_QOS_ENABLE:
+ case IOCMD_QOS_DISABLE:
+ rc = bfad_iocmd_qos(bfad, iocmd, cmd);
+ break;
+ case IOCMD_QOS_GET_ATTR:
+ rc = bfad_iocmd_qos_get_attr(bfad, iocmd);
+ break;
+ case IOCMD_QOS_GET_VC_ATTR:
+ rc = bfad_iocmd_qos_get_vc_attr(bfad, iocmd);
+ break;
+ case IOCMD_QOS_GET_STATS:
+ rc = bfad_iocmd_qos_get_stats(bfad, iocmd);
+ break;
+ case IOCMD_QOS_RESET_STATS:
+ rc = bfad_iocmd_qos_reset_stats(bfad, iocmd);
+ break;
+ case IOCMD_VF_GET_STATS:
+ rc = bfad_iocmd_vf_get_stats(bfad, iocmd);
+ break;
+ case IOCMD_VF_RESET_STATS:
+ rc = bfad_iocmd_vf_clr_stats(bfad, iocmd);
+ break;
+ case IOCMD_FCPIM_LUNMASK_ENABLE:
+ case IOCMD_FCPIM_LUNMASK_DISABLE:
+ case IOCMD_FCPIM_LUNMASK_CLEAR:
+ rc = bfad_iocmd_lunmask(bfad, iocmd, cmd);
+ break;
+ case IOCMD_FCPIM_LUNMASK_QUERY:
+ rc = bfad_iocmd_fcpim_lunmask_query(bfad, iocmd);
+ break;
+ case IOCMD_FCPIM_LUNMASK_ADD:
+ case IOCMD_FCPIM_LUNMASK_DELETE:
+ rc = bfad_iocmd_fcpim_cfg_lunmask(bfad, iocmd, cmd);
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+ return rc;
+}
+
+static int
+bfad_im_bsg_vendor_request(struct fc_bsg_job *job)
+{
+ uint32_t vendor_cmd = job->request->rqst_data.h_vendor.vendor_cmd[0];
+ struct bfad_im_port_s *im_port =
+ (struct bfad_im_port_s *) job->shost->hostdata[0];
+ struct bfad_s *bfad = im_port->bfad;
+ void *payload_kbuf;
+ int rc = -EINVAL;
+
+ /* Allocate a temp buffer to hold the passed in user space command */
+ payload_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
+ if (!payload_kbuf) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /* Copy the sg_list passed in to a linear buffer: holds the cmnd data */
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt, payload_kbuf,
+ job->request_payload.payload_len);
+
+ /* Invoke IOCMD handler - to handle all the vendor command requests */
+ rc = bfad_iocmd_handler(bfad, vendor_cmd, payload_kbuf,
+ job->request_payload.payload_len);
+ if (rc != BFA_STATUS_OK)
+ goto error;
+
+ /* Copy the response data to the job->reply_payload sg_list */
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ payload_kbuf,
+ job->reply_payload.payload_len);
+
+ /* free the command buffer */
+ kfree(payload_kbuf);
+
+ /* Fill the BSG job reply data */
+ job->reply_len = job->reply_payload.payload_len;
+ job->reply->reply_payload_rcv_len = job->reply_payload.payload_len;
+ job->reply->result = rc;
+
+ job->job_done(job);
+ return rc;
+error:
+ /* free the command buffer */
+ kfree(payload_kbuf);
+out:
+ job->reply->result = rc;
+ job->reply_len = sizeof(uint32_t);
+ job->reply->reply_payload_rcv_len = 0;
+ return rc;
+}
+
+/* FC passthru call backs */
+u64
+bfad_fcxp_get_req_sgaddr_cb(void *bfad_fcxp, int sgeid)
+{
+ struct bfad_fcxp *drv_fcxp = bfad_fcxp;
+ struct bfa_sge_s *sge;
+ u64 addr;
+
+ sge = drv_fcxp->req_sge + sgeid;
+ addr = (u64)(size_t) sge->sg_addr;
+ return addr;
+}
+
+u32
+bfad_fcxp_get_req_sglen_cb(void *bfad_fcxp, int sgeid)
+{
+ struct bfad_fcxp *drv_fcxp = bfad_fcxp;
+ struct bfa_sge_s *sge;
+
+ sge = drv_fcxp->req_sge + sgeid;
+ return sge->sg_len;
+}
+
+u64
+bfad_fcxp_get_rsp_sgaddr_cb(void *bfad_fcxp, int sgeid)
+{
+ struct bfad_fcxp *drv_fcxp = bfad_fcxp;
+ struct bfa_sge_s *sge;
+ u64 addr;
+
+ sge = drv_fcxp->rsp_sge + sgeid;
+ addr = (u64)(size_t) sge->sg_addr;
+ return addr;
+}
+
+u32
+bfad_fcxp_get_rsp_sglen_cb(void *bfad_fcxp, int sgeid)
+{
+ struct bfad_fcxp *drv_fcxp = bfad_fcxp;
+ struct bfa_sge_s *sge;
+
+ sge = drv_fcxp->rsp_sge + sgeid;
+ return sge->sg_len;
+}
+
+void
+bfad_send_fcpt_cb(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
+ bfa_status_t req_status, u32 rsp_len, u32 resid_len,
+ struct fchs_s *rsp_fchs)
+{
+ struct bfad_fcxp *drv_fcxp = bfad_fcxp;
+
+ drv_fcxp->req_status = req_status;
+ drv_fcxp->rsp_len = rsp_len;
+
+ /* bfa_fcxp will be automatically freed by BFA */
+ drv_fcxp->bfa_fcxp = NULL;
+ complete(&drv_fcxp->comp);
+}
+
+struct bfad_buf_info *
+bfad_fcxp_map_sg(struct bfad_s *bfad, void *payload_kbuf,
+ uint32_t payload_len, uint32_t *num_sgles)
+{
+ struct bfad_buf_info *buf_base, *buf_info;
+ struct bfa_sge_s *sg_table;
+ int sge_num = 1;
+
+ buf_base = kzalloc((sizeof(struct bfad_buf_info) +
+ sizeof(struct bfa_sge_s)) * sge_num, GFP_KERNEL);
+ if (!buf_base)
+ return NULL;
+
+ sg_table = (struct bfa_sge_s *) (((uint8_t *)buf_base) +
+ (sizeof(struct bfad_buf_info) * sge_num));
+
+ /* Allocate dma coherent memory */
+ buf_info = buf_base;
+ buf_info->size = payload_len;
+ buf_info->virt = dma_alloc_coherent(&bfad->pcidev->dev, buf_info->size,
+ &buf_info->phys, GFP_KERNEL);
+ if (!buf_info->virt)
+ goto out_free_mem;
+
+ /* copy the linear bsg buffer to buf_info */
+ memset(buf_info->virt, 0, buf_info->size);
+ memcpy(buf_info->virt, payload_kbuf, buf_info->size);
+
+ /*
+ * Setup SG table
+ */
+ sg_table->sg_len = buf_info->size;
+ sg_table->sg_addr = (void *)(size_t) buf_info->phys;
+
+ *num_sgles = sge_num;
+
+ return buf_base;
+
+out_free_mem:
+ kfree(buf_base);
+ return NULL;
+}
+
+void
+bfad_fcxp_free_mem(struct bfad_s *bfad, struct bfad_buf_info *buf_base,
+ uint32_t num_sgles)
+{
+ int i;
+ struct bfad_buf_info *buf_info = buf_base;
+
+ if (buf_base) {
+ for (i = 0; i < num_sgles; buf_info++, i++) {
+ if (buf_info->virt != NULL)
+ dma_free_coherent(&bfad->pcidev->dev,
+ buf_info->size, buf_info->virt,
+ buf_info->phys);
+ }
+ kfree(buf_base);
+ }
+}
+
+int
+bfad_fcxp_bsg_send(struct fc_bsg_job *job, struct bfad_fcxp *drv_fcxp,
+ bfa_bsg_fcpt_t *bsg_fcpt)
+{
+ struct bfa_fcxp_s *hal_fcxp;
+ struct bfad_s *bfad = drv_fcxp->port->bfad;
+ unsigned long flags;
+ uint8_t lp_tag;
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+
+ /* Allocate bfa_fcxp structure */
+ hal_fcxp = bfa_fcxp_alloc(drv_fcxp, &bfad->bfa,
+ drv_fcxp->num_req_sgles,
+ drv_fcxp->num_rsp_sgles,
+ bfad_fcxp_get_req_sgaddr_cb,
+ bfad_fcxp_get_req_sglen_cb,
+ bfad_fcxp_get_rsp_sgaddr_cb,
+ bfad_fcxp_get_rsp_sglen_cb);
+ if (!hal_fcxp) {
+ bfa_trc(bfad, 0);
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ return BFA_STATUS_ENOMEM;
+ }
+
+ drv_fcxp->bfa_fcxp = hal_fcxp;
+
+ lp_tag = bfa_lps_get_tag_from_pid(&bfad->bfa, bsg_fcpt->fchs.s_id);
+
+ bfa_fcxp_send(hal_fcxp, drv_fcxp->bfa_rport, bsg_fcpt->vf_id, lp_tag,
+ bsg_fcpt->cts, bsg_fcpt->cos,
+ job->request_payload.payload_len,
+ &bsg_fcpt->fchs, bfad_send_fcpt_cb, bfad,
+ job->reply_payload.payload_len, bsg_fcpt->tsecs);
+
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ return BFA_STATUS_OK;
+}
+
+int
+bfad_im_bsg_els_ct_request(struct fc_bsg_job *job)
+{
+ struct bfa_bsg_data *bsg_data;
+ struct bfad_im_port_s *im_port =
+ (struct bfad_im_port_s *) job->shost->hostdata[0];
+ struct bfad_s *bfad = im_port->bfad;
+ bfa_bsg_fcpt_t *bsg_fcpt;
+ struct bfad_fcxp *drv_fcxp;
+ struct bfa_fcs_lport_s *fcs_port;
+ struct bfa_fcs_rport_s *fcs_rport;
+ uint32_t command_type = job->request->msgcode;
+ unsigned long flags;
+ struct bfad_buf_info *rsp_buf_info;
+ void *req_kbuf = NULL, *rsp_kbuf = NULL;
+ int rc = -EINVAL;
+
+ job->reply_len = sizeof(uint32_t); /* Atleast uint32_t reply_len */
+ job->reply->reply_payload_rcv_len = 0;
+
+ /* Get the payload passed in from userspace */
+ bsg_data = (struct bfa_bsg_data *) (((char *)job->request) +
+ sizeof(struct fc_bsg_request));
+ if (bsg_data == NULL)
+ goto out;
+
+ /*
+ * Allocate buffer for bsg_fcpt and do a copy_from_user op for payload
+ * buffer of size bsg_data->payload_len
+ */
+ bsg_fcpt = (struct bfa_bsg_fcpt_s *)
+ kzalloc(bsg_data->payload_len, GFP_KERNEL);
+ if (!bsg_fcpt)
+ goto out;
+
+ if (copy_from_user((uint8_t *)bsg_fcpt, bsg_data->payload,
+ bsg_data->payload_len)) {
+ kfree(bsg_fcpt);
+ goto out;
+ }
+
+ drv_fcxp = kzalloc(sizeof(struct bfad_fcxp), GFP_KERNEL);
+ if (drv_fcxp == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ spin_lock_irqsave(&bfad->bfad_lock, flags);
+ fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, bsg_fcpt->vf_id,
+ bsg_fcpt->lpwwn);
+ if (fcs_port == NULL) {
+ bsg_fcpt->status = BFA_STATUS_UNKNOWN_LWWN;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ goto out_free_mem;
+ }
+
+ /* Check if the port is online before sending FC Passthru cmd */
+ if (!bfa_fcs_lport_is_online(fcs_port)) {
+ bsg_fcpt->status = BFA_STATUS_PORT_OFFLINE;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ goto out_free_mem;
+ }
+
+ drv_fcxp->port = fcs_port->bfad_port;
+
+ if (drv_fcxp->port->bfad == 0)
+ drv_fcxp->port->bfad = bfad;
+
+ /* Fetch the bfa_rport - if nexus needed */
+ if (command_type == FC_BSG_HST_ELS_NOLOGIN ||
+ command_type == FC_BSG_HST_CT) {
+ /* BSG HST commands: no nexus needed */
+ drv_fcxp->bfa_rport = NULL;
+
+ } else if (command_type == FC_BSG_RPT_ELS ||
+ command_type == FC_BSG_RPT_CT) {
+ /* BSG RPT commands: nexus needed */
+ fcs_rport = bfa_fcs_lport_get_rport_by_pwwn(fcs_port,
+ bsg_fcpt->dpwwn);
+ if (fcs_rport == NULL) {
+ bsg_fcpt->status = BFA_STATUS_UNKNOWN_RWWN;
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ goto out_free_mem;
+ }
+
+ drv_fcxp->bfa_rport = fcs_rport->bfa_rport;
+
+ } else { /* Unknown BSG msgcode; return -EINVAL */
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+ goto out_free_mem;
+ }
+
+ spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+ /* allocate memory for req / rsp buffers */
+ req_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
+ if (!req_kbuf) {
+ printk(KERN_INFO "bfa %s: fcpt request buffer alloc failed\n",
+ bfad->pci_name);
+ rc = -ENOMEM;
+ goto out_free_mem;
+ }
+
+ rsp_kbuf = kzalloc(job->reply_payload.payload_len, GFP_KERNEL);
+ if (!rsp_kbuf) {
+ printk(KERN_INFO "bfa %s: fcpt response buffer alloc failed\n",
+ bfad->pci_name);
+ rc = -ENOMEM;
+ goto out_free_mem;
+ }
+
+ /* map req sg - copy the sg_list passed in to the linear buffer */
+ sg_copy_to_buffer(job->request_payload.sg_list,
+ job->request_payload.sg_cnt, req_kbuf,
+ job->request_payload.payload_len);
+
+ drv_fcxp->reqbuf_info = bfad_fcxp_map_sg(bfad, req_kbuf,
+ job->request_payload.payload_len,
+ &drv_fcxp->num_req_sgles);
+ if (!drv_fcxp->reqbuf_info) {
+ printk(KERN_INFO "bfa %s: fcpt request fcxp_map_sg failed\n",
+ bfad->pci_name);
+ rc = -ENOMEM;
+ goto out_free_mem;
+ }
+
+ drv_fcxp->req_sge = (struct bfa_sge_s *)
+ (((uint8_t *)drv_fcxp->reqbuf_info) +
+ (sizeof(struct bfad_buf_info) *
+ drv_fcxp->num_req_sgles));
+
+ /* map rsp sg */
+ drv_fcxp->rspbuf_info = bfad_fcxp_map_sg(bfad, rsp_kbuf,
+ job->reply_payload.payload_len,
+ &drv_fcxp->num_rsp_sgles);
+ if (!drv_fcxp->rspbuf_info) {
+ printk(KERN_INFO "bfa %s: fcpt response fcxp_map_sg failed\n",
+ bfad->pci_name);
+ rc = -ENOMEM;
+ goto out_free_mem;
+ }
+
+ rsp_buf_info = (struct bfad_buf_info *)drv_fcxp->rspbuf_info;
+ drv_fcxp->rsp_sge = (struct bfa_sge_s *)
+ (((uint8_t *)drv_fcxp->rspbuf_info) +
+ (sizeof(struct bfad_buf_info) *
+ drv_fcxp->num_rsp_sgles));
+
+ /* fcxp send */
+ init_completion(&drv_fcxp->comp);
+ rc = bfad_fcxp_bsg_send(job, drv_fcxp, bsg_fcpt);
+ if (rc == BFA_STATUS_OK) {
+ wait_for_completion(&drv_fcxp->comp);
+ bsg_fcpt->status = drv_fcxp->req_status;
+ } else {
+ bsg_fcpt->status = rc;
+ goto out_free_mem;
+ }
+
+ /* fill the job->reply data */
+ if (drv_fcxp->req_status == BFA_STATUS_OK) {
+ job->reply_len = drv_fcxp->rsp_len;
+ job->reply->reply_payload_rcv_len = drv_fcxp->rsp_len;
+ job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
+ } else {
+ job->reply->reply_payload_rcv_len =
+ sizeof(struct fc_bsg_ctels_reply);
+ job->reply_len = sizeof(uint32_t);
+ job->reply->reply_data.ctels_reply.status =
+ FC_CTELS_STATUS_REJECT;
+ }
+
+ /* Copy the response data to the reply_payload sg list */
+ sg_copy_from_buffer(job->reply_payload.sg_list,
+ job->reply_payload.sg_cnt,
+ (uint8_t *)rsp_buf_info->virt,
+ job->reply_payload.payload_len);
+
+out_free_mem:
+ bfad_fcxp_free_mem(bfad, drv_fcxp->rspbuf_info,
+ drv_fcxp->num_rsp_sgles);
+ bfad_fcxp_free_mem(bfad, drv_fcxp->reqbuf_info,
+ drv_fcxp->num_req_sgles);
+ kfree(req_kbuf);
+ kfree(rsp_kbuf);
+
+ /* Need a copy to user op */
+ if (copy_to_user(bsg_data->payload, (void *) bsg_fcpt,
+ bsg_data->payload_len))
+ rc = -EIO;
+
+ kfree(bsg_fcpt);
+ kfree(drv_fcxp);
+out:
+ job->reply->result = rc;
+
+ if (rc == BFA_STATUS_OK)
+ job->job_done(job);
+
+ return rc;
+}
+
+int
+bfad_im_bsg_request(struct fc_bsg_job *job)
+{
+ uint32_t rc = BFA_STATUS_OK;
+
+ switch (job->request->msgcode) {
+ case FC_BSG_HST_VENDOR:
+ /* Process BSG HST Vendor requests */
+ rc = bfad_im_bsg_vendor_request(job);
+ break;
+ case FC_BSG_HST_ELS_NOLOGIN:
+ case FC_BSG_RPT_ELS:
+ case FC_BSG_HST_CT:
+ case FC_BSG_RPT_CT:
+ /* Process BSG ELS/CT commands */
+ rc = bfad_im_bsg_els_ct_request(job);
+ break;
+ default:
+ job->reply->result = rc = -EINVAL;
+ job->reply->reply_payload_rcv_len = 0;
+ break;
+ }
+
+ return rc;
+}
+
+int
+bfad_im_bsg_timeout(struct fc_bsg_job *job)
+{
+ /* Don't complete the BSG job request - return -EAGAIN
+ * to reset bsg job timeout : for ELS/CT pass thru we
+ * already have timer to track the request.
+ */
+ return -EAGAIN;
+}
diff --git a/drivers/scsi/bfa/bfad_bsg.h b/drivers/scsi/bfa/bfad_bsg.h
new file mode 100644
index 0000000..e859adb
--- /dev/null
+++ b/drivers/scsi/bfa/bfad_bsg.h
@@ -0,0 +1,746 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#ifndef BFAD_BSG_H
+#define BFAD_BSG_H
+
+#include "bfa_defs.h"
+#include "bfa_defs_fcs.h"
+
+/* Definitions of vendor unique structures and command codes passed in
+ * using FC_BSG_HST_VENDOR message code.
+ */
+enum {
+ IOCMD_IOC_ENABLE = 0x1,
+ IOCMD_IOC_DISABLE,
+ IOCMD_IOC_GET_ATTR,
+ IOCMD_IOC_GET_INFO,
+ IOCMD_IOC_GET_STATS,
+ IOCMD_IOC_GET_FWSTATS,
+ IOCMD_IOC_RESET_STATS,
+ IOCMD_IOC_RESET_FWSTATS,
+ IOCMD_IOC_SET_ADAPTER_NAME,
+ IOCMD_IOC_SET_PORT_NAME,
+ IOCMD_IOCFC_GET_ATTR,
+ IOCMD_IOCFC_SET_INTR,
+ IOCMD_PORT_ENABLE,
+ IOCMD_PORT_DISABLE,
+ IOCMD_PORT_GET_ATTR,
+ IOCMD_PORT_GET_STATS,
+ IOCMD_PORT_RESET_STATS,
+ IOCMD_PORT_CFG_TOPO,
+ IOCMD_PORT_CFG_SPEED,
+ IOCMD_PORT_CFG_ALPA,
+ IOCMD_PORT_CFG_MAXFRSZ,
+ IOCMD_PORT_CLR_ALPA,
+ IOCMD_PORT_BBSC_ENABLE,
+ IOCMD_PORT_BBSC_DISABLE,
+ IOCMD_LPORT_GET_ATTR,
+ IOCMD_LPORT_GET_RPORTS,
+ IOCMD_LPORT_GET_STATS,
+ IOCMD_LPORT_RESET_STATS,
+ IOCMD_LPORT_GET_IOSTATS,
+ IOCMD_RPORT_GET_ATTR,
+ IOCMD_RPORT_GET_ADDR,
+ IOCMD_RPORT_GET_STATS,
+ IOCMD_RPORT_RESET_STATS,
+ IOCMD_RPORT_SET_SPEED,
+ IOCMD_VPORT_GET_ATTR,
+ IOCMD_VPORT_GET_STATS,
+ IOCMD_VPORT_RESET_STATS,
+ IOCMD_FABRIC_GET_LPORTS,
+ IOCMD_RATELIM_ENABLE,
+ IOCMD_RATELIM_DISABLE,
+ IOCMD_RATELIM_DEF_SPEED,
+ IOCMD_FCPIM_FAILOVER,
+ IOCMD_FCPIM_MODSTATS,
+ IOCMD_FCPIM_MODSTATSCLR,
+ IOCMD_FCPIM_DEL_ITN_STATS,
+ IOCMD_ITNIM_GET_ATTR,
+ IOCMD_ITNIM_GET_IOSTATS,
+ IOCMD_ITNIM_RESET_STATS,
+ IOCMD_ITNIM_GET_ITNSTATS,
+ IOCMD_IOC_PCIFN_CFG,
+ IOCMD_FCPORT_ENABLE,
+ IOCMD_FCPORT_DISABLE,
+ IOCMD_PCIFN_CREATE,
+ IOCMD_PCIFN_DELETE,
+ IOCMD_PCIFN_BW,
+ IOCMD_ADAPTER_CFG_MODE,
+ IOCMD_PORT_CFG_MODE,
+ IOCMD_FLASH_ENABLE_OPTROM,
+ IOCMD_FLASH_DISABLE_OPTROM,
+ IOCMD_FAA_ENABLE,
+ IOCMD_FAA_DISABLE,
+ IOCMD_FAA_QUERY,
+ IOCMD_CEE_GET_ATTR,
+ IOCMD_CEE_GET_STATS,
+ IOCMD_CEE_RESET_STATS,
+ IOCMD_SFP_MEDIA,
+ IOCMD_SFP_SPEED,
+ IOCMD_FLASH_GET_ATTR,
+ IOCMD_FLASH_ERASE_PART,
+ IOCMD_FLASH_UPDATE_PART,
+ IOCMD_FLASH_READ_PART,
+ IOCMD_DIAG_TEMP,
+ IOCMD_DIAG_MEMTEST,
+ IOCMD_DIAG_LOOPBACK,
+ IOCMD_DIAG_FWPING,
+ IOCMD_DIAG_QUEUETEST,
+ IOCMD_DIAG_SFP,
+ IOCMD_DIAG_LED,
+ IOCMD_DIAG_BEACON_LPORT,
+ IOCMD_DIAG_LB_STAT,
+ IOCMD_PHY_GET_ATTR,
+ IOCMD_PHY_GET_STATS,
+ IOCMD_PHY_UPDATE_FW,
+ IOCMD_PHY_READ_FW,
+ IOCMD_VHBA_QUERY,
+ IOCMD_DEBUG_PORTLOG,
+ IOCMD_DEBUG_FW_CORE,
+ IOCMD_DEBUG_FW_STATE_CLR,
+ IOCMD_DEBUG_PORTLOG_CLR,
+ IOCMD_DEBUG_START_DTRC,
+ IOCMD_DEBUG_STOP_DTRC,
+ IOCMD_DEBUG_PORTLOG_CTL,
+ IOCMD_FCPIM_PROFILE_ON,
+ IOCMD_FCPIM_PROFILE_OFF,
+ IOCMD_ITNIM_GET_IOPROFILE,
+ IOCMD_FCPORT_GET_STATS,
+ IOCMD_FCPORT_RESET_STATS,
+ IOCMD_BOOT_CFG,
+ IOCMD_BOOT_QUERY,
+ IOCMD_PREBOOT_QUERY,
+ IOCMD_ETHBOOT_CFG,
+ IOCMD_ETHBOOT_QUERY,
+ IOCMD_TRUNK_ENABLE,
+ IOCMD_TRUNK_DISABLE,
+ IOCMD_TRUNK_GET_ATTR,
+ IOCMD_QOS_ENABLE,
+ IOCMD_QOS_DISABLE,
+ IOCMD_QOS_GET_ATTR,
+ IOCMD_QOS_GET_VC_ATTR,
+ IOCMD_QOS_GET_STATS,
+ IOCMD_QOS_RESET_STATS,
+ IOCMD_VF_GET_STATS,
+ IOCMD_VF_RESET_STATS,
+ IOCMD_FCPIM_LUNMASK_ENABLE,
+ IOCMD_FCPIM_LUNMASK_DISABLE,
+ IOCMD_FCPIM_LUNMASK_CLEAR,
+ IOCMD_FCPIM_LUNMASK_QUERY,
+ IOCMD_FCPIM_LUNMASK_ADD,
+ IOCMD_FCPIM_LUNMASK_DELETE,
+};
+
+struct bfa_bsg_gen_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+};
+
+struct bfa_bsg_portlogctl_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ bfa_boolean_t ctl;
+ int inst_no;
+};
+
+struct bfa_bsg_fcpim_profile_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+};
+
+struct bfa_bsg_itnim_ioprofile_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t lpwwn;
+ wwn_t rpwwn;
+ struct bfa_itnim_ioprofile_s ioprofile;
+};
+
+struct bfa_bsg_fcport_stats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ union bfa_fcport_stats_u stats;
+};
+
+struct bfa_bsg_ioc_name_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ char name[BFA_ADAPTER_SYM_NAME_LEN];
+};
+
+struct bfa_bsg_ioc_info_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ char serialnum[64];
+ char hwpath[BFA_STRING_32];
+ char adapter_hwpath[BFA_STRING_32];
+ char guid[BFA_ADAPTER_SYM_NAME_LEN*2];
+ char name[BFA_ADAPTER_SYM_NAME_LEN];
+ char port_name[BFA_ADAPTER_SYM_NAME_LEN];
+ char eth_name[BFA_ADAPTER_SYM_NAME_LEN];
+ wwn_t pwwn;
+ wwn_t nwwn;
+ wwn_t factorypwwn;
+ wwn_t factorynwwn;
+ mac_t mac;
+ mac_t factory_mac; /* Factory mac address */
+ mac_t current_mac; /* Currently assigned mac address */
+ enum bfa_ioc_type_e ioc_type;
+ u16 pvid; /* Port vlan id */
+ u16 rsvd1;
+ u32 host;
+ u32 bandwidth; /* For PF support */
+ u32 rsvd2;
+};
+
+struct bfa_bsg_ioc_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_ioc_attr_s ioc_attr;
+};
+
+struct bfa_bsg_ioc_stats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_ioc_stats_s ioc_stats;
+};
+
+struct bfa_bsg_ioc_fwstats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ u32 buf_size;
+ u32 rsvd1;
+ u64 buf_ptr;
+};
+
+struct bfa_bsg_iocfc_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_iocfc_attr_s iocfc_attr;
+};
+
+struct bfa_bsg_iocfc_intr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_iocfc_intr_attr_s attr;
+};
+
+struct bfa_bsg_port_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_port_attr_s attr;
+};
+
+struct bfa_bsg_port_cfg_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ u32 param;
+ u32 rsvd1;
+};
+
+struct bfa_bsg_port_cfg_maxfrsize_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 maxfrsize;
+};
+
+struct bfa_bsg_port_stats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ u32 buf_size;
+ u32 rsvd1;
+ u64 buf_ptr;
+};
+
+struct bfa_bsg_lport_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t pwwn;
+ struct bfa_lport_attr_s port_attr;
+};
+
+struct bfa_bsg_lport_stats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t pwwn;
+ struct bfa_lport_stats_s port_stats;
+};
+
+struct bfa_bsg_lport_iostats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t pwwn;
+ struct bfa_itnim_iostats_s iostats;
+};
+
+struct bfa_bsg_lport_get_rports_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t pwwn;
+ u64 rbuf_ptr;
+ u32 nrports;
+ u32 rsvd;
+};
+
+struct bfa_bsg_rport_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t pwwn;
+ wwn_t rpwwn;
+ struct bfa_rport_attr_s attr;
+};
+
+struct bfa_bsg_rport_stats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t pwwn;
+ wwn_t rpwwn;
+ struct bfa_rport_stats_s stats;
+};
+
+struct bfa_bsg_rport_scsi_addr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t pwwn;
+ wwn_t rpwwn;
+ u32 host;
+ u32 bus;
+ u32 target;
+ u32 lun;
+};
+
+struct bfa_bsg_rport_reset_stats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t pwwn;
+ wwn_t rpwwn;
+};
+
+struct bfa_bsg_rport_set_speed_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ enum bfa_port_speed speed;
+ u32 rsvd;
+ wwn_t pwwn;
+ wwn_t rpwwn;
+};
+
+struct bfa_bsg_vport_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t vpwwn;
+ struct bfa_vport_attr_s vport_attr;
+};
+
+struct bfa_bsg_vport_stats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t vpwwn;
+ struct bfa_vport_stats_s vport_stats;
+};
+
+struct bfa_bsg_reset_stats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t vpwwn;
+};
+
+struct bfa_bsg_fabric_get_lports_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ u64 buf_ptr;
+ u32 nports;
+ u32 rsvd;
+};
+
+struct bfa_bsg_trl_speed_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ enum bfa_port_speed speed;
+};
+
+struct bfa_bsg_fcpim_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 param;
+};
+
+struct bfa_bsg_fcpim_modstats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ struct bfa_itnim_iostats_s modstats;
+};
+
+struct bfa_bsg_fcpim_del_itn_stats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ struct bfa_fcpim_del_itn_stats_s modstats;
+};
+
+struct bfa_bsg_fcpim_modstatsclr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+};
+
+struct bfa_bsg_itnim_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t lpwwn;
+ wwn_t rpwwn;
+ struct bfa_itnim_attr_s attr;
+};
+
+struct bfa_bsg_itnim_iostats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t lpwwn;
+ wwn_t rpwwn;
+ struct bfa_itnim_iostats_s iostats;
+};
+
+struct bfa_bsg_itnim_itnstats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t lpwwn;
+ wwn_t rpwwn;
+ struct bfa_itnim_stats_s itnstats;
+};
+
+struct bfa_bsg_pcifn_cfg_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_ablk_cfg_s pcifn_cfg;
+};
+
+struct bfa_bsg_pcifn_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 pcifn_id;
+ u32 bandwidth;
+ u8 port;
+ enum bfi_pcifn_class pcifn_class;
+ u8 rsvd[1];
+};
+
+struct bfa_bsg_adapter_cfg_mode_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_adapter_cfg_mode_s cfg;
+};
+
+struct bfa_bsg_port_cfg_mode_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 instance;
+ struct bfa_port_cfg_mode_s cfg;
+};
+
+struct bfa_bsg_faa_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_faa_attr_s faa_attr;
+};
+
+struct bfa_bsg_cee_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ u32 buf_size;
+ u32 rsvd1;
+ u64 buf_ptr;
+};
+
+struct bfa_bsg_cee_stats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ u32 buf_size;
+ u32 rsvd1;
+ u64 buf_ptr;
+};
+
+struct bfa_bsg_sfp_media_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ enum bfa_defs_sfp_media_e media;
+};
+
+struct bfa_bsg_sfp_speed_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ enum bfa_port_speed speed;
+};
+
+struct bfa_bsg_flash_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_flash_attr_s attr;
+};
+
+struct bfa_bsg_flash_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u8 instance;
+ u8 rsvd;
+ enum bfa_flash_part_type type;
+ int bufsz;
+ u64 buf_ptr;
+};
+
+struct bfa_bsg_diag_get_temp_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_diag_results_tempsensor_s result;
+};
+
+struct bfa_bsg_diag_memtest_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd[3];
+ u32 pat;
+ struct bfa_diag_memtest_result result;
+ struct bfa_diag_memtest_s memtest;
+};
+
+struct bfa_bsg_diag_loopback_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ enum bfa_port_opmode opmode;
+ enum bfa_port_speed speed;
+ u32 lpcnt;
+ u32 pat;
+ struct bfa_diag_loopback_result_s result;
+};
+
+struct bfa_bsg_diag_fwping_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ u32 cnt;
+ u32 pattern;
+ struct bfa_diag_results_fwping result;
+};
+
+struct bfa_bsg_diag_qtest_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ u32 force;
+ u32 queue;
+ struct bfa_diag_qtest_result_s result;
+};
+
+struct bfa_bsg_sfp_show_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct sfp_mem_s sfp;
+};
+
+struct bfa_bsg_diag_led_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_diag_ledtest_s ledtest;
+};
+
+struct bfa_bsg_diag_beacon_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ bfa_boolean_t beacon;
+ bfa_boolean_t link_e2e_beacon;
+ u32 second;
+};
+
+struct bfa_bsg_diag_lb_stat_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+};
+
+struct bfa_bsg_phy_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 instance;
+ struct bfa_phy_attr_s attr;
+};
+
+struct bfa_bsg_phy_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 instance;
+ u64 bufsz;
+ u64 buf_ptr;
+};
+
+struct bfa_bsg_debug_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ u32 bufsz;
+ int inst_no;
+ u64 buf_ptr;
+ u64 offset;
+};
+
+struct bfa_bsg_phy_stats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 instance;
+ struct bfa_phy_stats_s stats;
+};
+
+struct bfa_bsg_vhba_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 pcifn_id;
+ struct bfa_vhba_attr_s attr;
+};
+
+struct bfa_bsg_boot_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_boot_cfg_s cfg;
+};
+
+struct bfa_bsg_preboot_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_boot_pbc_s cfg;
+};
+
+struct bfa_bsg_ethboot_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_ethboot_cfg_s cfg;
+};
+
+struct bfa_bsg_trunk_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_trunk_attr_s attr;
+};
+
+struct bfa_bsg_qos_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_qos_attr_s attr;
+};
+
+struct bfa_bsg_qos_vc_attr_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 rsvd;
+ struct bfa_qos_vc_attr_s attr;
+};
+
+struct bfa_bsg_vf_stats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ struct bfa_vf_stats_s stats;
+};
+
+struct bfa_bsg_vf_reset_stats_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+};
+
+struct bfa_bsg_fcpim_lunmask_query_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ struct bfa_lunmask_cfg_s lun_mask;
+};
+
+struct bfa_bsg_fcpim_lunmask_s {
+ bfa_status_t status;
+ u16 bfad_num;
+ u16 vf_id;
+ wwn_t pwwn;
+ wwn_t rpwwn;
+ struct scsi_lun lun;
+};
+
+struct bfa_bsg_fcpt_s {
+ bfa_status_t status;
+ u16 vf_id;
+ wwn_t lpwwn;
+ wwn_t dpwwn;
+ u32 tsecs;
+ int cts;
+ enum fc_cos cos;
+ struct fchs_s fchs;
+};
+#define bfa_bsg_fcpt_t struct bfa_bsg_fcpt_s
+
+struct bfa_bsg_data {
+ int payload_len;
+ void *payload;
+};
+
+#define bfad_chk_iocmd_sz(__payload_len, __hdrsz, __bufsz) \
+ (((__payload_len) != ((__hdrsz) + (__bufsz))) ? \
+ BFA_STATUS_FAILED : BFA_STATUS_OK)
+
+#endif /* BFAD_BSG_H */
diff --git a/drivers/scsi/bfa/bfi_reg.h b/drivers/scsi/bfa/bfi_reg.h
new file mode 100644
index 0000000..d892064
--- /dev/null
+++ b/drivers/scsi/bfa/bfi_reg.h
@@ -0,0 +1,450 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+/*
+ * bfi_reg.h ASIC register defines for all Brocade adapter ASICs
+ */
+
+#ifndef __BFI_REG_H__
+#define __BFI_REG_H__
+
+#define HOSTFN0_INT_STATUS 0x00014000 /* cb/ct */
+#define HOSTFN1_INT_STATUS 0x00014100 /* cb/ct */
+#define HOSTFN2_INT_STATUS 0x00014300 /* ct */
+#define HOSTFN3_INT_STATUS 0x00014400 /* ct */
+#define HOSTFN0_INT_MSK 0x00014004 /* cb/ct */
+#define HOSTFN1_INT_MSK 0x00014104 /* cb/ct */
+#define HOSTFN2_INT_MSK 0x00014304 /* ct */
+#define HOSTFN3_INT_MSK 0x00014404 /* ct */
+
+#define HOST_PAGE_NUM_FN0 0x00014008 /* cb/ct */
+#define HOST_PAGE_NUM_FN1 0x00014108 /* cb/ct */
+#define HOST_PAGE_NUM_FN2 0x00014308 /* ct */
+#define HOST_PAGE_NUM_FN3 0x00014408 /* ct */
+
+#define APP_PLL_LCLK_CTL_REG 0x00014204 /* cb/ct */
+#define __P_LCLK_PLL_LOCK 0x80000000
+#define __APP_PLL_LCLK_SRAM_USE_100MHZ 0x00100000
+#define __APP_PLL_LCLK_RESET_TIMER_MK 0x000e0000
+#define __APP_PLL_LCLK_RESET_TIMER_SH 17
+#define __APP_PLL_LCLK_RESET_TIMER(_v) ((_v) << __APP_PLL_LCLK_RESET_TIMER_SH)
+#define __APP_PLL_LCLK_LOGIC_SOFT_RESET 0x00010000
+#define __APP_PLL_LCLK_CNTLMT0_1_MK 0x0000c000
+#define __APP_PLL_LCLK_CNTLMT0_1_SH 14
+#define __APP_PLL_LCLK_CNTLMT0_1(_v) ((_v) << __APP_PLL_LCLK_CNTLMT0_1_SH)
+#define __APP_PLL_LCLK_JITLMT0_1_MK 0x00003000
+#define __APP_PLL_LCLK_JITLMT0_1_SH 12
+#define __APP_PLL_LCLK_JITLMT0_1(_v) ((_v) << __APP_PLL_LCLK_JITLMT0_1_SH)
+#define __APP_PLL_LCLK_HREF 0x00000800
+#define __APP_PLL_LCLK_HDIV 0x00000400
+#define __APP_PLL_LCLK_P0_1_MK 0x00000300
+#define __APP_PLL_LCLK_P0_1_SH 8
+#define __APP_PLL_LCLK_P0_1(_v) ((_v) << __APP_PLL_LCLK_P0_1_SH)
+#define __APP_PLL_LCLK_Z0_2_MK 0x000000e0
+#define __APP_PLL_LCLK_Z0_2_SH 5
+#define __APP_PLL_LCLK_Z0_2(_v) ((_v) << __APP_PLL_LCLK_Z0_2_SH)
+#define __APP_PLL_LCLK_RSEL200500 0x00000010
+#define __APP_PLL_LCLK_ENARST 0x00000008
+#define __APP_PLL_LCLK_BYPASS 0x00000004
+#define __APP_PLL_LCLK_LRESETN 0x00000002
+#define __APP_PLL_LCLK_ENABLE 0x00000001
+#define APP_PLL_SCLK_CTL_REG 0x00014208 /* cb/ct */
+#define __P_SCLK_PLL_LOCK 0x80000000
+#define __APP_PLL_SCLK_RESET_TIMER_MK 0x000e0000
+#define __APP_PLL_SCLK_RESET_TIMER_SH 17
+#define __APP_PLL_SCLK_RESET_TIMER(_v) ((_v) << __APP_PLL_SCLK_RESET_TIMER_SH)
+#define __APP_PLL_SCLK_LOGIC_SOFT_RESET 0x00010000
+#define __APP_PLL_SCLK_CNTLMT0_1_MK 0x0000c000
+#define __APP_PLL_SCLK_CNTLMT0_1_SH 14
+#define __APP_PLL_SCLK_CNTLMT0_1(_v) ((_v) << __APP_PLL_SCLK_CNTLMT0_1_SH)
+#define __APP_PLL_SCLK_JITLMT0_1_MK 0x00003000
+#define __APP_PLL_SCLK_JITLMT0_1_SH 12
+#define __APP_PLL_SCLK_JITLMT0_1(_v) ((_v) << __APP_PLL_SCLK_JITLMT0_1_SH)
+#define __APP_PLL_SCLK_HREF 0x00000800
+#define __APP_PLL_SCLK_HDIV 0x00000400
+#define __APP_PLL_SCLK_P0_1_MK 0x00000300
+#define __APP_PLL_SCLK_P0_1_SH 8
+#define __APP_PLL_SCLK_P0_1(_v) ((_v) << __APP_PLL_SCLK_P0_1_SH)
+#define __APP_PLL_SCLK_Z0_2_MK 0x000000e0
+#define __APP_PLL_SCLK_Z0_2_SH 5
+#define __APP_PLL_SCLK_Z0_2(_v) ((_v) << __APP_PLL_SCLK_Z0_2_SH)
+#define __APP_PLL_SCLK_RSEL200500 0x00000010
+#define __APP_PLL_SCLK_ENARST 0x00000008
+#define __APP_PLL_SCLK_BYPASS 0x00000004
+#define __APP_PLL_SCLK_LRESETN 0x00000002
+#define __APP_PLL_SCLK_ENABLE 0x00000001
+#define __ENABLE_MAC_AHB_1 0x00800000 /* ct */
+#define __ENABLE_MAC_AHB_0 0x00400000 /* ct */
+#define __ENABLE_MAC_1 0x00200000 /* ct */
+#define __ENABLE_MAC_0 0x00100000 /* ct */
+
+#define HOST_SEM0_REG 0x00014230 /* cb/ct */
+#define HOST_SEM1_REG 0x00014234 /* cb/ct */
+#define HOST_SEM2_REG 0x00014238 /* cb/ct */
+#define HOST_SEM3_REG 0x0001423c /* cb/ct */
+#define HOST_SEM4_REG 0x00014610 /* cb/ct */
+#define HOST_SEM5_REG 0x00014614 /* cb/ct */
+#define HOST_SEM6_REG 0x00014618 /* cb/ct */
+#define HOST_SEM7_REG 0x0001461c /* cb/ct */
+#define HOST_SEM0_INFO_REG 0x00014240 /* cb/ct */
+#define HOST_SEM1_INFO_REG 0x00014244 /* cb/ct */
+#define HOST_SEM2_INFO_REG 0x00014248 /* cb/ct */
+#define HOST_SEM3_INFO_REG 0x0001424c /* cb/ct */
+#define HOST_SEM4_INFO_REG 0x00014620 /* cb/ct */
+#define HOST_SEM5_INFO_REG 0x00014624 /* cb/ct */
+#define HOST_SEM6_INFO_REG 0x00014628 /* cb/ct */
+#define HOST_SEM7_INFO_REG 0x0001462c /* cb/ct */
+
+#define HOSTFN0_LPU0_CMD_STAT 0x00019000 /* cb/ct */
+#define HOSTFN0_LPU1_CMD_STAT 0x00019004 /* cb/ct */
+#define HOSTFN1_LPU0_CMD_STAT 0x00019010 /* cb/ct */
+#define HOSTFN1_LPU1_CMD_STAT 0x00019014 /* cb/ct */
+#define HOSTFN2_LPU0_CMD_STAT 0x00019150 /* ct */
+#define HOSTFN2_LPU1_CMD_STAT 0x00019154 /* ct */
+#define HOSTFN3_LPU0_CMD_STAT 0x00019160 /* ct */
+#define HOSTFN3_LPU1_CMD_STAT 0x00019164 /* ct */
+#define LPU0_HOSTFN0_CMD_STAT 0x00019008 /* cb/ct */
+#define LPU1_HOSTFN0_CMD_STAT 0x0001900c /* cb/ct */
+#define LPU0_HOSTFN1_CMD_STAT 0x00019018 /* cb/ct */
+#define LPU1_HOSTFN1_CMD_STAT 0x0001901c /* cb/ct */
+#define LPU0_HOSTFN2_CMD_STAT 0x00019158 /* ct */
+#define LPU1_HOSTFN2_CMD_STAT 0x0001915c /* ct */
+#define LPU0_HOSTFN3_CMD_STAT 0x00019168 /* ct */
+#define LPU1_HOSTFN3_CMD_STAT 0x0001916c /* ct */
+
+#define PSS_CTL_REG 0x00018800 /* cb/ct */
+#define __PSS_I2C_CLK_DIV_MK 0x007f0000
+#define __PSS_I2C_CLK_DIV_SH 16
+#define __PSS_I2C_CLK_DIV(_v) ((_v) << __PSS_I2C_CLK_DIV_SH)
+#define __PSS_LMEM_INIT_DONE 0x00001000
+#define __PSS_LMEM_RESET 0x00000200
+#define __PSS_LMEM_INIT_EN 0x00000100
+#define __PSS_LPU1_RESET 0x00000002
+#define __PSS_LPU0_RESET 0x00000001
+#define PSS_ERR_STATUS_REG 0x00018810 /* cb/ct */
+#define ERR_SET_REG 0x00018818 /* cb/ct */
+#define PSS_GPIO_OUT_REG 0x000188c0 /* cb/ct */
+#define __PSS_GPIO_OUT_REG 0x00000fff
+#define PSS_GPIO_OE_REG 0x000188c8 /* cb/ct */
+#define __PSS_GPIO_OE_REG 0x000000ff
+
+#define HOSTFN0_LPU_MBOX0_0 0x00019200 /* cb/ct */
+#define HOSTFN1_LPU_MBOX0_8 0x00019260 /* cb/ct */
+#define LPU_HOSTFN0_MBOX0_0 0x00019280 /* cb/ct */
+#define LPU_HOSTFN1_MBOX0_8 0x000192e0 /* cb/ct */
+#define HOSTFN2_LPU_MBOX0_0 0x00019400 /* ct */
+#define HOSTFN3_LPU_MBOX0_8 0x00019460 /* ct */
+#define LPU_HOSTFN2_MBOX0_0 0x00019480 /* ct */
+#define LPU_HOSTFN3_MBOX0_8 0x000194e0 /* ct */
+
+#define HOST_MSIX_ERR_INDEX_FN0 0x0001400c /* ct */
+#define HOST_MSIX_ERR_INDEX_FN1 0x0001410c /* ct */
+#define HOST_MSIX_ERR_INDEX_FN2 0x0001430c /* ct */
+#define HOST_MSIX_ERR_INDEX_FN3 0x0001440c /* ct */
+
+#define MBIST_CTL_REG 0x00014220 /* ct */
+#define __EDRAM_BISTR_START 0x00000004
+#define MBIST_STAT_REG 0x00014224 /* ct */
+#define ETH_MAC_SER_REG 0x00014288 /* ct */
+#define __APP_EMS_CKBUFAMPIN 0x00000020
+#define __APP_EMS_REFCLKSEL 0x00000010
+#define __APP_EMS_CMLCKSEL 0x00000008
+#define __APP_EMS_REFCKBUFEN2 0x00000004
+#define __APP_EMS_REFCKBUFEN1 0x00000002
+#define __APP_EMS_CHANNEL_SEL 0x00000001
+#define FNC_PERS_REG 0x00014604 /* ct */
+#define __F3_FUNCTION_ACTIVE 0x80000000
+#define __F3_FUNCTION_MODE 0x40000000
+#define __F3_PORT_MAP_MK 0x30000000
+#define __F3_PORT_MAP_SH 28
+#define __F3_PORT_MAP(_v) ((_v) << __F3_PORT_MAP_SH)
+#define __F3_VM_MODE 0x08000000
+#define __F3_INTX_STATUS_MK 0x07000000
+#define __F3_INTX_STATUS_SH 24
+#define __F3_INTX_STATUS(_v) ((_v) << __F3_INTX_STATUS_SH)
+#define __F2_FUNCTION_ACTIVE 0x00800000
+#define __F2_FUNCTION_MODE 0x00400000
+#define __F2_PORT_MAP_MK 0x00300000
+#define __F2_PORT_MAP_SH 20
+#define __F2_PORT_MAP(_v) ((_v) << __F2_PORT_MAP_SH)
+#define __F2_VM_MODE 0x00080000
+#define __F2_INTX_STATUS_MK 0x00070000
+#define __F2_INTX_STATUS_SH 16
+#define __F2_INTX_STATUS(_v) ((_v) << __F2_INTX_STATUS_SH)
+#define __F1_FUNCTION_ACTIVE 0x00008000
+#define __F1_FUNCTION_MODE 0x00004000
+#define __F1_PORT_MAP_MK 0x00003000
+#define __F1_PORT_MAP_SH 12
+#define __F1_PORT_MAP(_v) ((_v) << __F1_PORT_MAP_SH)
+#define __F1_VM_MODE 0x00000800
+#define __F1_INTX_STATUS_MK 0x00000700
+#define __F1_INTX_STATUS_SH 8
+#define __F1_INTX_STATUS(_v) ((_v) << __F1_INTX_STATUS_SH)
+#define __F0_FUNCTION_ACTIVE 0x00000080
+#define __F0_FUNCTION_MODE 0x00000040
+#define __F0_PORT_MAP_MK 0x00000030
+#define __F0_PORT_MAP_SH 4
+#define __F0_PORT_MAP(_v) ((_v) << __F0_PORT_MAP_SH)
+#define __F0_VM_MODE 0x00000008
+#define __F0_INTX_STATUS 0x00000007
+enum {
+ __F0_INTX_STATUS_MSIX = 0x0,
+ __F0_INTX_STATUS_INTA = 0x1,
+ __F0_INTX_STATUS_INTB = 0x2,
+ __F0_INTX_STATUS_INTC = 0x3,
+ __F0_INTX_STATUS_INTD = 0x4,
+};
+
+#define OP_MODE 0x0001460c /* ct */
+#define __APP_ETH_CLK_LOWSPEED 0x00000004
+#define __GLOBAL_CORECLK_HALFSPEED 0x00000002
+#define __GLOBAL_FCOE_MODE 0x00000001
+#define FW_INIT_HALT_P0 0x000191ac /* ct */
+#define __FW_INIT_HALT_P 0x00000001
+#define FW_INIT_HALT_P1 0x000191bc /* ct */
+#define PMM_1T_RESET_REG_P0 0x0002381c /* ct */
+#define __PMM_1T_RESET_P 0x00000001
+#define PMM_1T_RESET_REG_P1 0x00023c1c /* ct */
+
+/**
+ * Catapult-2 specific defines
+ */
+#define CT2_PCI_CPQ_BASE 0x00030000
+#define CT2_PCI_APP_BASE 0x00030100
+#define CT2_PCI_ETH_BASE 0x00030400
+
+/*
+ * APP block registers
+ */
+#define CT2_HOSTFN_INT_STATUS (CT2_PCI_APP_BASE + 0x00)
+#define CT2_HOSTFN_INTR_MASK (CT2_PCI_APP_BASE + 0x04)
+#define CT2_HOSTFN_PERSONALITY0 (CT2_PCI_APP_BASE + 0x08)
+#define __PME_STATUS_ 0x00200000
+#define __PF_VF_BAR_SIZE_MODE__MK 0x00180000
+#define __PF_VF_BAR_SIZE_MODE__SH 19
+#define __PF_VF_BAR_SIZE_MODE_(_v) ((_v) << __PF_VF_BAR_SIZE_MODE__SH)
+#define __FC_LL_PORT_MAP__MK 0x00060000
+#define __FC_LL_PORT_MAP__SH 17
+#define __FC_LL_PORT_MAP_(_v) ((_v) << __FC_LL_PORT_MAP__SH)
+#define __PF_VF_ACTIVE_ 0x00010000
+#define __PF_VF_CFG_RDY_ 0x00008000
+#define __PF_VF_ENABLE_ 0x00004000
+#define __PF_DRIVER_ACTIVE_ 0x00002000
+#define __PF_PME_SEND_ENABLE_ 0x00001000
+#define __PF_EXROM_OFFSET__MK 0x00000ff0
+#define __PF_EXROM_OFFSET__SH 4
+#define __PF_EXROM_OFFSET_(_v) ((_v) << __PF_EXROM_OFFSET__SH)
+#define __FC_LL_MODE_ 0x00000008
+#define __PF_INTX_PIN_ 0x00000007
+#define CT2_HOSTFN_PERSONALITY1 (CT2_PCI_APP_BASE + 0x0C)
+#define __PF_NUM_QUEUES1__MK 0xff000000
+#define __PF_NUM_QUEUES1__SH 24
+#define __PF_NUM_QUEUES1_(_v) ((_v) << __PF_NUM_QUEUES1__SH)
+#define __PF_VF_QUE_OFFSET1__MK 0x00ff0000
+#define __PF_VF_QUE_OFFSET1__SH 16
+#define __PF_VF_QUE_OFFSET1_(_v) ((_v) << __PF_VF_QUE_OFFSET1__SH)
+#define __PF_VF_NUM_QUEUES__MK 0x0000ff00
+#define __PF_VF_NUM_QUEUES__SH 8
+#define __PF_VF_NUM_QUEUES_(_v) ((_v) << __PF_VF_NUM_QUEUES__SH)
+#define __PF_VF_QUE_OFFSET_ 0x000000ff
+#define CT2_HOSTFN_PAGE_NUM (CT2_PCI_APP_BASE + 0x18)
+#define CT2_HOSTFN_MSIX_VT_INDEX_MBOX_ERR (CT2_PCI_APP_BASE + 0x38)
+
+/*
+ * Catapult-2 CPQ block registers
+ */
+#define CT2_HOSTFN_LPU0_MBOX0 (CT2_PCI_CPQ_BASE + 0x00)
+#define CT2_HOSTFN_LPU1_MBOX0 (CT2_PCI_CPQ_BASE + 0x20)
+#define CT2_LPU0_HOSTFN_MBOX0 (CT2_PCI_CPQ_BASE + 0x40)
+#define CT2_LPU1_HOSTFN_MBOX0 (CT2_PCI_CPQ_BASE + 0x60)
+#define CT2_HOSTFN_LPU0_CMD_STAT (CT2_PCI_CPQ_BASE + 0x80)
+#define CT2_HOSTFN_LPU1_CMD_STAT (CT2_PCI_CPQ_BASE + 0x84)
+#define CT2_LPU0_HOSTFN_CMD_STAT (CT2_PCI_CPQ_BASE + 0x88)
+#define CT2_LPU1_HOSTFN_CMD_STAT (CT2_PCI_CPQ_BASE + 0x8c)
+#define CT2_HOSTFN_LPU0_READ_STAT (CT2_PCI_CPQ_BASE + 0x90)
+#define CT2_HOSTFN_LPU1_READ_STAT (CT2_PCI_CPQ_BASE + 0x94)
+#define CT2_LPU0_HOSTFN_MBOX0_MSK (CT2_PCI_CPQ_BASE + 0x98)
+#define CT2_LPU1_HOSTFN_MBOX0_MSK (CT2_PCI_CPQ_BASE + 0x9C)
+#define CT2_HOST_SEM0_REG 0x000148f0
+#define CT2_HOST_SEM1_REG 0x000148f4
+#define CT2_HOST_SEM2_REG 0x000148f8
+#define CT2_HOST_SEM3_REG 0x000148fc
+#define CT2_HOST_SEM4_REG 0x00014900
+#define CT2_HOST_SEM5_REG 0x00014904
+#define CT2_HOST_SEM6_REG 0x00014908
+#define CT2_HOST_SEM7_REG 0x0001490c
+#define CT2_HOST_SEM0_INFO_REG 0x000148b0
+#define CT2_HOST_SEM1_INFO_REG 0x000148b4
+#define CT2_HOST_SEM2_INFO_REG 0x000148b8
+#define CT2_HOST_SEM3_INFO_REG 0x000148bc
+#define CT2_HOST_SEM4_INFO_REG 0x000148c0
+#define CT2_HOST_SEM5_INFO_REG 0x000148c4
+#define CT2_HOST_SEM6_INFO_REG 0x000148c8
+#define CT2_HOST_SEM7_INFO_REG 0x000148cc
+
+#define CT2_APP_PLL_LCLK_CTL_REG 0x00014808
+#define __APP_LPUCLK_HALFSPEED 0x40000000
+#define __APP_PLL_LCLK_LOAD 0x20000000
+#define __APP_PLL_LCLK_FBCNT_MK 0x1fe00000
+#define __APP_PLL_LCLK_FBCNT_SH 21
+#define __APP_PLL_LCLK_FBCNT(_v) ((_v) << __APP_PLL_SCLK_FBCNT_SH)
+enum {
+ __APP_PLL_LCLK_FBCNT_425_MHZ = 6,
+ __APP_PLL_LCLK_FBCNT_468_MHZ = 4,
+};
+#define __APP_PLL_LCLK_EXTFB 0x00000800
+#define __APP_PLL_LCLK_ENOUTS 0x00000400
+#define __APP_PLL_LCLK_RATE 0x00000010
+#define CT2_APP_PLL_SCLK_CTL_REG 0x0001480c
+#define __P_SCLK_PLL_LOCK 0x80000000
+#define __APP_PLL_SCLK_REFCLK_SEL 0x40000000
+#define __APP_PLL_SCLK_CLK_DIV2 0x20000000
+#define __APP_PLL_SCLK_LOAD 0x10000000
+#define __APP_PLL_SCLK_FBCNT_MK 0x0ff00000
+#define __APP_PLL_SCLK_FBCNT_SH 20
+#define __APP_PLL_SCLK_FBCNT(_v) ((_v) << __APP_PLL_SCLK_FBCNT_SH)
+enum {
+ __APP_PLL_SCLK_FBCNT_NORM = 6,
+ __APP_PLL_SCLK_FBCNT_10G_FC = 10,
+};
+#define __APP_PLL_SCLK_EXTFB 0x00000800
+#define __APP_PLL_SCLK_ENOUTS 0x00000400
+#define __APP_PLL_SCLK_RATE 0x00000010
+#define CT2_PCIE_MISC_REG 0x00014804
+#define __ETH_CLK_ENABLE_PORT1 0x00000010
+#define CT2_CHIP_MISC_PRG 0x000148a4
+#define __ETH_CLK_ENABLE_PORT0 0x00004000
+#define __APP_LPU_SPEED 0x00000002
+#define CT2_MBIST_STAT_REG 0x00014818
+#define CT2_MBIST_CTL_REG 0x0001481c
+#define CT2_PMM_1T_CONTROL_REG_P0 0x0002381c
+#define __PMM_1T_PNDB_P 0x00000002
+#define CT2_PMM_1T_CONTROL_REG_P1 0x00023c1c
+#define CT2_WGN_STATUS 0x00014990
+#define __WGN_READY 0x00000400
+#define __GLBL_PF_VF_CFG_RDY 0x00000200
+#define CT2_NFC_CSR_SET_REG 0x00027424
+#define __HALT_NFC_CONTROLLER 0x00000002
+#define __NFC_CONTROLLER_HALTED 0x00001000
+
+#define CT2_CSI_MAC0_CONTROL_REG 0x000270d0
+#define __CSI_MAC_RESET 0x00000010
+#define __CSI_MAC_AHB_RESET 0x00000008
+#define CT2_CSI_MAC1_CONTROL_REG 0x000270d4
+#define CT2_CSI_MAC_CONTROL_REG(__n) \
+ (CT2_CSI_MAC0_CONTROL_REG + \
+ (__n) * (CT2_CSI_MAC1_CONTROL_REG - CT2_CSI_MAC0_CONTROL_REG))
+
+/*
+ * Name semaphore registers based on usage
+ */
+#define BFA_IOC0_HBEAT_REG HOST_SEM0_INFO_REG
+#define BFA_IOC0_STATE_REG HOST_SEM1_INFO_REG
+#define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG
+#define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG
+#define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG
+#define BFA_IOC_FAIL_SYNC HOST_SEM5_INFO_REG
+
+/*
+ * CT2 semaphore register locations changed
+ */
+#define CT2_BFA_IOC0_HBEAT_REG CT2_HOST_SEM0_INFO_REG
+#define CT2_BFA_IOC0_STATE_REG CT2_HOST_SEM1_INFO_REG
+#define CT2_BFA_IOC1_HBEAT_REG CT2_HOST_SEM2_INFO_REG
+#define CT2_BFA_IOC1_STATE_REG CT2_HOST_SEM3_INFO_REG
+#define CT2_BFA_FW_USE_COUNT CT2_HOST_SEM4_INFO_REG
+#define CT2_BFA_IOC_FAIL_SYNC CT2_HOST_SEM5_INFO_REG
+
+#define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
+#define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
+
+/*
+ * And corresponding host interrupt status bit field defines
+ */
+#define __HFN_INT_CPE_Q0 0x00000001U
+#define __HFN_INT_CPE_Q1 0x00000002U
+#define __HFN_INT_CPE_Q2 0x00000004U
+#define __HFN_INT_CPE_Q3 0x00000008U
+#define __HFN_INT_CPE_Q4 0x00000010U
+#define __HFN_INT_CPE_Q5 0x00000020U
+#define __HFN_INT_CPE_Q6 0x00000040U
+#define __HFN_INT_CPE_Q7 0x00000080U
+#define __HFN_INT_RME_Q0 0x00000100U
+#define __HFN_INT_RME_Q1 0x00000200U
+#define __HFN_INT_RME_Q2 0x00000400U
+#define __HFN_INT_RME_Q3 0x00000800U
+#define __HFN_INT_RME_Q4 0x00001000U
+#define __HFN_INT_RME_Q5 0x00002000U
+#define __HFN_INT_RME_Q6 0x00004000U
+#define __HFN_INT_RME_Q7 0x00008000U
+#define __HFN_INT_ERR_EMC 0x00010000U
+#define __HFN_INT_ERR_LPU0 0x00020000U
+#define __HFN_INT_ERR_LPU1 0x00040000U
+#define __HFN_INT_ERR_PSS 0x00080000U
+#define __HFN_INT_MBOX_LPU0 0x00100000U
+#define __HFN_INT_MBOX_LPU1 0x00200000U
+#define __HFN_INT_MBOX1_LPU0 0x00400000U
+#define __HFN_INT_MBOX1_LPU1 0x00800000U
+#define __HFN_INT_LL_HALT 0x01000000U
+#define __HFN_INT_CPE_MASK 0x000000ffU
+#define __HFN_INT_RME_MASK 0x0000ff00U
+#define __HFN_INT_ERR_MASK \
+ (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 | __HFN_INT_ERR_LPU1 | \
+ __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT)
+#define __HFN_INT_FN0_MASK \
+ (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 | __HFN_INT_CPE_Q2 | \
+ __HFN_INT_CPE_Q3 | __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 | \
+ __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 | __HFN_INT_MBOX_LPU0)
+#define __HFN_INT_FN1_MASK \
+ (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 | __HFN_INT_CPE_Q6 | \
+ __HFN_INT_CPE_Q7 | __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 | \
+ __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 | __HFN_INT_MBOX_LPU1)
+
+/*
+ * Host interrupt status defines for catapult-2
+ */
+#define __HFN_INT_MBOX_LPU0_CT2 0x00010000U
+#define __HFN_INT_MBOX_LPU1_CT2 0x00020000U
+#define __HFN_INT_ERR_PSS_CT2 0x00040000U
+#define __HFN_INT_ERR_LPU0_CT2 0x00080000U
+#define __HFN_INT_ERR_LPU1_CT2 0x00100000U
+#define __HFN_INT_CPQ_HALT_CT2 0x00200000U
+#define __HFN_INT_ERR_WGN_CT2 0x00400000U
+#define __HFN_INT_ERR_LEHRX_CT2 0x00800000U
+#define __HFN_INT_ERR_LEHTX_CT2 0x01000000U
+#define __HFN_INT_ERR_MASK_CT2 \
+ (__HFN_INT_ERR_PSS_CT2 | __HFN_INT_ERR_LPU0_CT2 | \
+ __HFN_INT_ERR_LPU1_CT2 | __HFN_INT_CPQ_HALT_CT2 | \
+ __HFN_INT_ERR_WGN_CT2 | __HFN_INT_ERR_LEHRX_CT2 | \
+ __HFN_INT_ERR_LEHTX_CT2)
+#define __HFN_INT_FN0_MASK_CT2 \
+ (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 | __HFN_INT_CPE_Q2 | \
+ __HFN_INT_CPE_Q3 | __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 | \
+ __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 | __HFN_INT_MBOX_LPU0_CT2)
+#define __HFN_INT_FN1_MASK_CT2 \
+ (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 | __HFN_INT_CPE_Q6 | \
+ __HFN_INT_CPE_Q7 | __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 | \
+ __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 | __HFN_INT_MBOX_LPU1_CT2)
+
+/*
+ * asic memory map.
+ */
+#define PSS_SMEM_PAGE_START 0x8000
+#define PSS_SMEM_PGNUM(_pg0, _ma) ((_pg0) + ((_ma) >> 15))
+#define PSS_SMEM_PGOFF(_ma) ((_ma) & 0x7fff)
+
+#endif /* __BFI_REG_H__ */
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
new file mode 100644
index 0000000..88cf1db
--- /dev/null
+++ b/drivers/scsi/mvumi.c
@@ -0,0 +1,2018 @@
+/*
+ * Marvell UMI driver
+ *
+ * Copyright 2011 Marvell. <jyli@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/blkdev.h>
+#include <linux/io.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport.h>
+#include <scsi/scsi_eh.h>
+#include <linux/uaccess.h>
+
+#include "mvumi.h"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("jyli@marvell.com");
+MODULE_DESCRIPTION("Marvell UMI Driver");
+
+static DEFINE_PCI_DEVICE_TABLE(mvumi_pci_table) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_2, PCI_DEVICE_ID_MARVELL_MV9143) },
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, mvumi_pci_table);
+
+static void tag_init(struct mvumi_tag *st, unsigned short size)
+{
+ unsigned short i;
+ BUG_ON(size != st->size);
+ st->top = size;
+ for (i = 0; i < size; i++)
+ st->stack[i] = size - 1 - i;
+}
+
+static unsigned short tag_get_one(struct mvumi_hba *mhba, struct mvumi_tag *st)
+{
+ BUG_ON(st->top <= 0);
+ return st->stack[--st->top];
+}
+
+static void tag_release_one(struct mvumi_hba *mhba, struct mvumi_tag *st,
+ unsigned short tag)
+{
+ BUG_ON(st->top >= st->size);
+ st->stack[st->top++] = tag;
+}
+
+static bool tag_is_empty(struct mvumi_tag *st)
+{
+ if (st->top == 0)
+ return 1;
+ else
+ return 0;
+}
+
+static void mvumi_unmap_pci_addr(struct pci_dev *dev, void **addr_array)
+{
+ int i;
+
+ for (i = 0; i < MAX_BASE_ADDRESS; i++)
+ if ((pci_resource_flags(dev, i) & IORESOURCE_MEM) &&
+ addr_array[i])
+ pci_iounmap(dev, addr_array[i]);
+}
+
+static int mvumi_map_pci_addr(struct pci_dev *dev, void **addr_array)
+{
+ int i;
+
+ for (i = 0; i < MAX_BASE_ADDRESS; i++) {
+ if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
+ addr_array[i] = pci_iomap(dev, i, 0);
+ if (!addr_array[i]) {
+ dev_err(&dev->dev, "failed to map Bar[%d]\n",
+ i);
+ mvumi_unmap_pci_addr(dev, addr_array);
+ return -ENOMEM;
+ }
+ } else
+ addr_array[i] = NULL;
+
+ dev_dbg(&dev->dev, "Bar %d : %p.\n", i, addr_array[i]);
+ }
+
+ return 0;
+}
+
+static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba,
+ enum resource_type type, unsigned int size)
+{
+ struct mvumi_res *res = kzalloc(sizeof(*res), GFP_KERNEL);
+
+ if (!res) {
+ dev_err(&mhba->pdev->dev,
+ "Failed to allocate memory for resouce manager.\n");
+ return NULL;
+ }
+
+ switch (type) {
+ case RESOURCE_CACHED_MEMORY:
+ res->virt_addr = kzalloc(size, GFP_KERNEL);
+ if (!res->virt_addr) {
+ dev_err(&mhba->pdev->dev,
+ "unable to allocate memory,size = %d.\n", size);
+ kfree(res);
+ return NULL;
+ }
+ break;
+
+ case RESOURCE_UNCACHED_MEMORY:
+ size = round_up(size, 8);
+ res->virt_addr = pci_alloc_consistent(mhba->pdev, size,
+ &res->bus_addr);
+ if (!res->virt_addr) {
+ dev_err(&mhba->pdev->dev,
+ "unable to allocate consistent mem,"
+ "size = %d.\n", size);
+ kfree(res);
+ return NULL;
+ }
+ memset(res->virt_addr, 0, size);
+ break;
+
+ default:
+ dev_err(&mhba->pdev->dev, "unknown resource type %d.\n", type);
+ kfree(res);
+ return NULL;
+ }
+
+ res->type = type;
+ res->size = size;
+ INIT_LIST_HEAD(&res->entry);
+ list_add_tail(&res->entry, &mhba->res_list);
+
+ return res;
+}
+
+static void mvumi_release_mem_resource(struct mvumi_hba *mhba)
+{
+ struct mvumi_res *res, *tmp;
+
+ list_for_each_entry_safe(res, tmp, &mhba->res_list, entry) {
+ switch (res->type) {
+ case RESOURCE_UNCACHED_MEMORY:
+ pci_free_consistent(mhba->pdev, res->size,
+ res->virt_addr, res->bus_addr);
+ break;
+ case RESOURCE_CACHED_MEMORY:
+ kfree(res->virt_addr);
+ break;
+ default:
+ dev_err(&mhba->pdev->dev,
+ "unknown resource type %d\n", res->type);
+ break;
+ }
+ list_del(&res->entry);
+ kfree(res);
+ }
+ mhba->fw_flag &= ~MVUMI_FW_ALLOC;
+}
+
+/**
+ * mvumi_make_sgl - Prepares SGL
+ * @mhba: Adapter soft state
+ * @scmd: SCSI command from the mid-layer
+ * @sgl_p: SGL to be filled in
+ * @sg_count return the number of SG elements
+ *
+ * If successful, this function returns 0. otherwise, it returns -1.
+ */
+static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd,
+ void *sgl_p, unsigned char *sg_count)
+{
+ struct scatterlist *sg;
+ struct mvumi_sgl *m_sg = (struct mvumi_sgl *) sgl_p;
+ unsigned int i;
+ unsigned int sgnum = scsi_sg_count(scmd);
+ dma_addr_t busaddr;
+
+ if (sgnum) {
+ sg = scsi_sglist(scmd);
+ *sg_count = pci_map_sg(mhba->pdev, sg, sgnum,
+ (int) scmd->sc_data_direction);
+ if (*sg_count > mhba->max_sge) {
+ dev_err(&mhba->pdev->dev, "sg count[0x%x] is bigger "
+ "than max sg[0x%x].\n",
+ *sg_count, mhba->max_sge);
+ return -1;
+ }
+ for (i = 0; i < *sg_count; i++) {
+ busaddr = sg_dma_address(&sg[i]);
+ m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
+ m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
+ m_sg->flags = 0;
+ m_sg->size = cpu_to_le32(sg_dma_len(&sg[i]));
+ if ((i + 1) == *sg_count)
+ m_sg->flags |= SGD_EOT;
+
+ m_sg++;
+ }
+ } else {
+ scmd->SCp.dma_handle = scsi_bufflen(scmd) ?
+ pci_map_single(mhba->pdev, scsi_sglist(scmd),
+ scsi_bufflen(scmd),
+ (int) scmd->sc_data_direction)
+ : 0;
+ busaddr = scmd->SCp.dma_handle;
+ m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
+ m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
+ m_sg->flags = SGD_EOT;
+ m_sg->size = cpu_to_le32(scsi_bufflen(scmd));
+ *sg_count = 1;
+ }
+
+ return 0;
+}
+
+static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
+ unsigned int size)
+{
+ struct mvumi_sgl *m_sg;
+ void *virt_addr;
+ dma_addr_t phy_addr;
+
+ if (size == 0)
+ return 0;
+
+ virt_addr = pci_alloc_consistent(mhba->pdev, size, &phy_addr);
+ if (!virt_addr)
+ return -1;
+
+ memset(virt_addr, 0, size);
+
+ m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
+ cmd->frame->sg_counts = 1;
+ cmd->data_buf = virt_addr;
+
+ m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(phy_addr));
+ m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(phy_addr));
+ m_sg->flags = SGD_EOT;
+ m_sg->size = cpu_to_le32(size);
+
+ return 0;
+}
+
+static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba,
+ unsigned int buf_size)
+{
+ struct mvumi_cmd *cmd;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd) {
+ dev_err(&mhba->pdev->dev, "failed to create a internal cmd\n");
+ return NULL;
+ }
+ INIT_LIST_HEAD(&cmd->queue_pointer);
+
+ cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL);
+ if (!cmd->frame) {
+ dev_err(&mhba->pdev->dev, "failed to allocate memory for FW"
+ " frame,size = %d.\n", mhba->ib_max_size);
+ kfree(cmd);
+ return NULL;
+ }
+
+ if (buf_size) {
+ if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) {
+ dev_err(&mhba->pdev->dev, "failed to allocate memory"
+ " for internal frame\n");
+ kfree(cmd->frame);
+ kfree(cmd);
+ return NULL;
+ }
+ } else
+ cmd->frame->sg_counts = 0;
+
+ return cmd;
+}
+
+static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba,
+ struct mvumi_cmd *cmd)
+{
+ struct mvumi_sgl *m_sg;
+ unsigned int size;
+ dma_addr_t phy_addr;
+
+ if (cmd && cmd->frame) {
+ if (cmd->frame->sg_counts) {
+ m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
+ size = m_sg->size;
+
+ phy_addr = (dma_addr_t) m_sg->baseaddr_l |
+ (dma_addr_t) ((m_sg->baseaddr_h << 16) << 16);
+
+ pci_free_consistent(mhba->pdev, size, cmd->data_buf,
+ phy_addr);
+ }
+ kfree(cmd->frame);
+ kfree(cmd);
+ }
+}
+
+/**
+ * mvumi_get_cmd - Get a command from the free pool
+ * @mhba: Adapter soft state
+ *
+ * Returns a free command from the pool
+ */
+static struct mvumi_cmd *mvumi_get_cmd(struct mvumi_hba *mhba)
+{
+ struct mvumi_cmd *cmd = NULL;
+
+ if (likely(!list_empty(&mhba->cmd_pool))) {
+ cmd = list_entry((&mhba->cmd_pool)->next,
+ struct mvumi_cmd, queue_pointer);
+ list_del_init(&cmd->queue_pointer);
+ } else
+ dev_warn(&mhba->pdev->dev, "command pool is empty!\n");
+
+ return cmd;
+}
+
+/**
+ * mvumi_return_cmd - Return a cmd to free command pool
+ * @mhba: Adapter soft state
+ * @cmd: Command packet to be returned to free command pool
+ */
+static inline void mvumi_return_cmd(struct mvumi_hba *mhba,
+ struct mvumi_cmd *cmd)
+{
+ cmd->scmd = NULL;
+ list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
+}
+
+/**
+ * mvumi_free_cmds - Free all the cmds in the free cmd pool
+ * @mhba: Adapter soft state
+ */
+static void mvumi_free_cmds(struct mvumi_hba *mhba)
+{
+ struct mvumi_cmd *cmd;
+
+ while (!list_empty(&mhba->cmd_pool)) {
+ cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
+ queue_pointer);
+ list_del(&cmd->queue_pointer);
+ kfree(cmd->frame);
+ kfree(cmd);
+ }
+}
+
+/**
+ * mvumi_alloc_cmds - Allocates the command packets
+ * @mhba: Adapter soft state
+ *
+ */
+static int mvumi_alloc_cmds(struct mvumi_hba *mhba)
+{
+ int i;
+ struct mvumi_cmd *cmd;
+
+ for (i = 0; i < mhba->max_io; i++) {
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ goto err_exit;
+
+ INIT_LIST_HEAD(&cmd->queue_pointer);
+ list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
+ cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL);
+ if (!cmd->frame)
+ goto err_exit;
+ }
+ return 0;
+
+err_exit:
+ dev_err(&mhba->pdev->dev,
+ "failed to allocate memory for cmd[0x%x].\n", i);
+ while (!list_empty(&mhba->cmd_pool)) {
+ cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
+ queue_pointer);
+ list_del(&cmd->queue_pointer);
+ kfree(cmd->frame);
+ kfree(cmd);
+ }
+ return -ENOMEM;
+}
+
+static int mvumi_get_ib_list_entry(struct mvumi_hba *mhba, void **ib_entry)
+{
+ unsigned int ib_rp_reg, cur_ib_entry;
+
+ if (atomic_read(&mhba->fw_outstanding) >= mhba->max_io) {
+ dev_warn(&mhba->pdev->dev, "firmware io overflow.\n");
+ return -1;
+ }
+ ib_rp_reg = ioread32(mhba->mmio + CLA_INB_READ_POINTER);
+
+ if (unlikely(((ib_rp_reg & CL_SLOT_NUM_MASK) ==
+ (mhba->ib_cur_slot & CL_SLOT_NUM_MASK)) &&
+ ((ib_rp_reg & CL_POINTER_TOGGLE) !=
+ (mhba->ib_cur_slot & CL_POINTER_TOGGLE)))) {
+ dev_warn(&mhba->pdev->dev, "no free slot to use.\n");
+ return -1;
+ }
+
+ cur_ib_entry = mhba->ib_cur_slot & CL_SLOT_NUM_MASK;
+ cur_ib_entry++;
+ if (cur_ib_entry >= mhba->list_num_io) {
+ cur_ib_entry -= mhba->list_num_io;
+ mhba->ib_cur_slot ^= CL_POINTER_TOGGLE;
+ }
+ mhba->ib_cur_slot &= ~CL_SLOT_NUM_MASK;
+ mhba->ib_cur_slot |= (cur_ib_entry & CL_SLOT_NUM_MASK);
+ *ib_entry = mhba->ib_list + cur_ib_entry * mhba->ib_max_size;
+ atomic_inc(&mhba->fw_outstanding);
+
+ return 0;
+}
+
+static void mvumi_send_ib_list_entry(struct mvumi_hba *mhba)
+{
+ iowrite32(0xfff, mhba->ib_shadow);
+ iowrite32(mhba->ib_cur_slot, mhba->mmio + CLA_INB_WRITE_POINTER);
+}
+
+static char mvumi_check_ob_frame(struct mvumi_hba *mhba,
+ unsigned int cur_obf, struct mvumi_rsp_frame *p_outb_frame)
+{
+ unsigned short tag, request_id;
+
+ udelay(1);
+ p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
+ request_id = p_outb_frame->request_id;
+ tag = p_outb_frame->tag;
+ if (tag > mhba->tag_pool.size) {
+ dev_err(&mhba->pdev->dev, "ob frame data error\n");
+ return -1;
+ }
+ if (mhba->tag_cmd[tag] == NULL) {
+ dev_err(&mhba->pdev->dev, "tag[0x%x] with NO command\n", tag);
+ return -1;
+ } else if (mhba->tag_cmd[tag]->request_id != request_id &&
+ mhba->request_id_enabled) {
+ dev_err(&mhba->pdev->dev, "request ID from FW:0x%x,"
+ "cmd request ID:0x%x\n", request_id,
+ mhba->tag_cmd[tag]->request_id);
+ return -1;
+ }
+
+ return 0;
+}
+
+static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba)
+{
+ unsigned int ob_write_reg, ob_write_shadow_reg;
+ unsigned int cur_obf, assign_obf_end, i;
+ struct mvumi_ob_data *ob_data;
+ struct mvumi_rsp_frame *p_outb_frame;
+
+ do {
+ ob_write_reg = ioread32(mhba->mmio + CLA_OUTB_COPY_POINTER);
+ ob_write_shadow_reg = ioread32(mhba->ob_shadow);
+ } while ((ob_write_reg & CL_SLOT_NUM_MASK) != ob_write_shadow_reg);
+
+ cur_obf = mhba->ob_cur_slot & CL_SLOT_NUM_MASK;
+ assign_obf_end = ob_write_reg & CL_SLOT_NUM_MASK;
+
+ if ((ob_write_reg & CL_POINTER_TOGGLE) !=
+ (mhba->ob_cur_slot & CL_POINTER_TOGGLE)) {
+ assign_obf_end += mhba->list_num_io;
+ }
+
+ for (i = (assign_obf_end - cur_obf); i != 0; i--) {
+ cur_obf++;
+ if (cur_obf >= mhba->list_num_io) {
+ cur_obf -= mhba->list_num_io;
+ mhba->ob_cur_slot ^= CL_POINTER_TOGGLE;
+ }
+
+ p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
+
+ /* Copy pointer may point to entry in outbound list
+ * before entry has valid data
+ */
+ if (unlikely(p_outb_frame->tag > mhba->tag_pool.size ||
+ mhba->tag_cmd[p_outb_frame->tag] == NULL ||
+ p_outb_frame->request_id !=
+ mhba->tag_cmd[p_outb_frame->tag]->request_id))
+ if (mvumi_check_ob_frame(mhba, cur_obf, p_outb_frame))
+ continue;
+
+ if (!list_empty(&mhba->ob_data_list)) {
+ ob_data = (struct mvumi_ob_data *)
+ list_first_entry(&mhba->ob_data_list,
+ struct mvumi_ob_data, list);
+ list_del_init(&ob_data->list);
+ } else {
+ ob_data = NULL;
+ if (cur_obf == 0) {
+ cur_obf = mhba->list_num_io - 1;
+ mhba->ob_cur_slot ^= CL_POINTER_TOGGLE;
+ } else
+ cur_obf -= 1;
+ break;
+ }
+
+ memcpy(ob_data->data, p_outb_frame, mhba->ob_max_size);
+ p_outb_frame->tag = 0xff;
+
+ list_add_tail(&ob_data->list, &mhba->free_ob_list);
+ }
+ mhba->ob_cur_slot &= ~CL_SLOT_NUM_MASK;
+ mhba->ob_cur_slot |= (cur_obf & CL_SLOT_NUM_MASK);
+ iowrite32(mhba->ob_cur_slot, mhba->mmio + CLA_OUTB_READ_POINTER);
+}
+
+static void mvumi_reset(void *regs)
+{
+ iowrite32(0, regs + CPU_ENPOINTA_MASK_REG);
+ if (ioread32(regs + CPU_ARM_TO_PCIEA_MSG1) != HANDSHAKE_DONESTATE)
+ return;
+
+ iowrite32(DRBL_SOFT_RESET, regs + CPU_PCIEA_TO_ARM_DRBL_REG);
+}
+
+static unsigned char mvumi_start(struct mvumi_hba *mhba);
+
+static int mvumi_wait_for_outstanding(struct mvumi_hba *mhba)
+{
+ mhba->fw_state = FW_STATE_ABORT;
+ mvumi_reset(mhba->mmio);
+
+ if (mvumi_start(mhba))
+ return FAILED;
+ else
+ return SUCCESS;
+}
+
+static int mvumi_host_reset(struct scsi_cmnd *scmd)
+{
+ struct mvumi_hba *mhba;
+
+ mhba = (struct mvumi_hba *) scmd->device->host->hostdata;
+
+ scmd_printk(KERN_NOTICE, scmd, "RESET -%ld cmd=%x retries=%x\n",
+ scmd->serial_number, scmd->cmnd[0], scmd->retries);
+
+ return mvumi_wait_for_outstanding(mhba);
+}
+
+static int mvumi_issue_blocked_cmd(struct mvumi_hba *mhba,
+ struct mvumi_cmd *cmd)
+{
+ unsigned long flags;
+
+ cmd->cmd_status = REQ_STATUS_PENDING;
+
+ if (atomic_read(&cmd->sync_cmd)) {
+ dev_err(&mhba->pdev->dev,
+ "last blocked cmd not finished, sync_cmd = %d\n",
+ atomic_read(&cmd->sync_cmd));
+ BUG_ON(1);
+ return -1;
+ }
+ atomic_inc(&cmd->sync_cmd);
+ spin_lock_irqsave(mhba->shost->host_lock, flags);
+ mhba->instancet->fire_cmd(mhba, cmd);
+ spin_unlock_irqrestore(mhba->shost->host_lock, flags);
+
+ wait_event_timeout(mhba->int_cmd_wait_q,
+ (cmd->cmd_status != REQ_STATUS_PENDING),
+ MVUMI_INTERNAL_CMD_WAIT_TIME * HZ);
+
+ /* command timeout */
+ if (atomic_read(&cmd->sync_cmd)) {
+ spin_lock_irqsave(mhba->shost->host_lock, flags);
+ atomic_dec(&cmd->sync_cmd);
+ if (mhba->tag_cmd[cmd->frame->tag]) {
+ mhba->tag_cmd[cmd->frame->tag] = 0;
+ dev_warn(&mhba->pdev->dev, "TIMEOUT:release tag [%d]\n",
+ cmd->frame->tag);
+ tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
+ }
+ if (!list_empty(&cmd->queue_pointer)) {
+ dev_warn(&mhba->pdev->dev,
+ "TIMEOUT:A internal command doesn't send!\n");
+ list_del_init(&cmd->queue_pointer);
+ } else
+ atomic_dec(&mhba->fw_outstanding);
+
+ spin_unlock_irqrestore(mhba->shost->host_lock, flags);
+ }
+ return 0;
+}
+
+static void mvumi_release_fw(struct mvumi_hba *mhba)
+{
+ mvumi_free_cmds(mhba);
+ mvumi_release_mem_resource(mhba);
+ mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
+ kfree(mhba->handshake_page);
+ pci_release_regions(mhba->pdev);
+}
+
+static unsigned char mvumi_flush_cache(struct mvumi_hba *mhba)
+{
+ struct mvumi_cmd *cmd;
+ struct mvumi_msg_frame *frame;
+ unsigned char device_id, retry = 0;
+ unsigned char bitcount = sizeof(unsigned char) * 8;
+
+ for (device_id = 0; device_id < mhba->max_target_id; device_id++) {
+ if (!(mhba->target_map[device_id / bitcount] &
+ (1 << (device_id % bitcount))))
+ continue;
+get_cmd: cmd = mvumi_create_internal_cmd(mhba, 0);
+ if (!cmd) {
+ if (retry++ >= 5) {
+ dev_err(&mhba->pdev->dev, "failed to get memory"
+ " for internal flush cache cmd for "
+ "device %d", device_id);
+ retry = 0;
+ continue;
+ } else
+ goto get_cmd;
+ }
+ cmd->scmd = NULL;
+ cmd->cmd_status = REQ_STATUS_PENDING;
+ atomic_set(&cmd->sync_cmd, 0);
+ frame = cmd->frame;
+ frame->req_function = CL_FUN_SCSI_CMD;
+ frame->device_id = device_id;
+ frame->cmd_flag = CMD_FLAG_NON_DATA;
+ frame->data_transfer_length = 0;
+ frame->cdb_length = MAX_COMMAND_SIZE;
+ memset(frame->cdb, 0, MAX_COMMAND_SIZE);
+ frame->cdb[0] = SCSI_CMD_MARVELL_SPECIFIC;
+ frame->cdb[2] = CDB_CORE_SHUTDOWN;
+
+ mvumi_issue_blocked_cmd(mhba, cmd);
+ if (cmd->cmd_status != SAM_STAT_GOOD) {
+ dev_err(&mhba->pdev->dev,
+ "device %d flush cache failed, status=0x%x.\n",
+ device_id, cmd->cmd_status);
+ }
+
+ mvumi_delete_internal_cmd(mhba, cmd);
+ }
+ return 0;
+}
+
+static unsigned char
+mvumi_calculate_checksum(struct mvumi_hs_header *p_header,
+ unsigned short len)
+{
+ unsigned char *ptr;
+ unsigned char ret = 0, i;
+
+ ptr = (unsigned char *) p_header->frame_content;
+ for (i = 0; i < len; i++) {
+ ret ^= *ptr;
+ ptr++;
+ }
+
+ return ret;
+}
+
+void mvumi_hs_build_page(struct mvumi_hba *mhba,
+ struct mvumi_hs_header *hs_header)
+{
+ struct mvumi_hs_page2 *hs_page2;
+ struct mvumi_hs_page4 *hs_page4;
+ struct mvumi_hs_page3 *hs_page3;
+ struct timeval time;
+ unsigned int local_time;
+
+ switch (hs_header->page_code) {
+ case HS_PAGE_HOST_INFO:
+ hs_page2 = (struct mvumi_hs_page2 *) hs_header;
+ hs_header->frame_length = sizeof(*hs_page2) - 4;
+ memset(hs_header->frame_content, 0, hs_header->frame_length);
+ hs_page2->host_type = 3; /* 3 mean linux*/
+ hs_page2->host_ver.ver_major = VER_MAJOR;
+ hs_page2->host_ver.ver_minor = VER_MINOR;
+ hs_page2->host_ver.ver_oem = VER_OEM;
+ hs_page2->host_ver.ver_build = VER_BUILD;
+ hs_page2->system_io_bus = 0;
+ hs_page2->slot_number = 0;
+ hs_page2->intr_level = 0;
+ hs_page2->intr_vector = 0;
+ do_gettimeofday(&time);
+ local_time = (unsigned int) (time.tv_sec -
+ (sys_tz.tz_minuteswest * 60));
+ hs_page2->seconds_since1970 = local_time;
+ hs_header->checksum = mvumi_calculate_checksum(hs_header,
+ hs_header->frame_length);
+ break;
+
+ case HS_PAGE_FIRM_CTL:
+ hs_page3 = (struct mvumi_hs_page3 *) hs_header;
+ hs_header->frame_length = sizeof(*hs_page3) - 4;
+ memset(hs_header->frame_content, 0, hs_header->frame_length);
+ hs_header->checksum = mvumi_calculate_checksum(hs_header,
+ hs_header->frame_length);
+ break;
+
+ case HS_PAGE_CL_INFO:
+ hs_page4 = (struct mvumi_hs_page4 *) hs_header;
+ hs_header->frame_length = sizeof(*hs_page4) - 4;
+ memset(hs_header->frame_content, 0, hs_header->frame_length);
+ hs_page4->ib_baseaddr_l = lower_32_bits(mhba->ib_list_phys);
+ hs_page4->ib_baseaddr_h = upper_32_bits(mhba->ib_list_phys);
+
+ hs_page4->ob_baseaddr_l = lower_32_bits(mhba->ob_list_phys);
+ hs_page4->ob_baseaddr_h = upper_32_bits(mhba->ob_list_phys);
+ hs_page4->ib_entry_size = mhba->ib_max_size_setting;
+ hs_page4->ob_entry_size = mhba->ob_max_size_setting;
+ hs_page4->ob_depth = mhba->list_num_io;
+ hs_page4->ib_depth = mhba->list_num_io;
+ hs_header->checksum = mvumi_calculate_checksum(hs_header,
+ hs_header->frame_length);
+ break;
+
+ default:
+ dev_err(&mhba->pdev->dev, "cannot build page, code[0x%x]\n",
+ hs_header->page_code);
+ break;
+ }
+}
+
+/**
+ * mvumi_init_data - Initialize requested date for FW
+ * @mhba: Adapter soft state
+ */
+static int mvumi_init_data(struct mvumi_hba *mhba)
+{
+ struct mvumi_ob_data *ob_pool;
+ struct mvumi_res *res_mgnt;
+ unsigned int tmp_size, offset, i;
+ void *virmem, *v;
+ dma_addr_t p;
+
+ if (mhba->fw_flag & MVUMI_FW_ALLOC)
+ return 0;
+
+ tmp_size = mhba->ib_max_size * mhba->max_io;
+ tmp_size += 128 + mhba->ob_max_size * mhba->max_io;
+ tmp_size += 8 + sizeof(u32) + 16;
+
+ res_mgnt = mvumi_alloc_mem_resource(mhba,
+ RESOURCE_UNCACHED_MEMORY, tmp_size);
+ if (!res_mgnt) {
+ dev_err(&mhba->pdev->dev,
+ "failed to allocate memory for inbound list\n");
+ goto fail_alloc_dma_buf;
+ }
+
+ p = res_mgnt->bus_addr;
+ v = res_mgnt->virt_addr;
+ /* ib_list */
+ offset = round_up(p, 128) - p;
+ p += offset;
+ v += offset;
+ mhba->ib_list = v;
+ mhba->ib_list_phys = p;
+ v += mhba->ib_max_size * mhba->max_io;
+ p += mhba->ib_max_size * mhba->max_io;
+ /* ib shadow */
+ offset = round_up(p, 8) - p;
+ p += offset;
+ v += offset;
+ mhba->ib_shadow = v;
+ mhba->ib_shadow_phys = p;
+ p += sizeof(u32);
+ v += sizeof(u32);
+ /* ob shadow */
+ offset = round_up(p, 8) - p;
+ p += offset;
+ v += offset;
+ mhba->ob_shadow = v;
+ mhba->ob_shadow_phys = p;
+ p += 8;
+ v += 8;
+
+ /* ob list */
+ offset = round_up(p, 128) - p;
+ p += offset;
+ v += offset;
+
+ mhba->ob_list = v;
+ mhba->ob_list_phys = p;
+
+ /* ob data pool */
+ tmp_size = mhba->max_io * (mhba->ob_max_size + sizeof(*ob_pool));
+ tmp_size = round_up(tmp_size, 8);
+
+ res_mgnt = mvumi_alloc_mem_resource(mhba,
+ RESOURCE_CACHED_MEMORY, tmp_size);
+ if (!res_mgnt) {
+ dev_err(&mhba->pdev->dev,
+ "failed to allocate memory for outbound data buffer\n");
+ goto fail_alloc_dma_buf;
+ }
+ virmem = res_mgnt->virt_addr;
+
+ for (i = mhba->max_io; i != 0; i--) {
+ ob_pool = (struct mvumi_ob_data *) virmem;
+ list_add_tail(&ob_pool->list, &mhba->ob_data_list);
+ virmem += mhba->ob_max_size + sizeof(*ob_pool);
+ }
+
+ tmp_size = sizeof(unsigned short) * mhba->max_io +
+ sizeof(struct mvumi_cmd *) * mhba->max_io;
+ tmp_size += round_up(mhba->max_target_id, sizeof(unsigned char) * 8) /
+ (sizeof(unsigned char) * 8);
+
+ res_mgnt = mvumi_alloc_mem_resource(mhba,
+ RESOURCE_CACHED_MEMORY, tmp_size);
+ if (!res_mgnt) {
+ dev_err(&mhba->pdev->dev,
+ "failed to allocate memory for tag and target map\n");
+ goto fail_alloc_dma_buf;
+ }
+
+ virmem = res_mgnt->virt_addr;
+ mhba->tag_pool.stack = virmem;
+ mhba->tag_pool.size = mhba->max_io;
+ tag_init(&mhba->tag_pool, mhba->max_io);
+ virmem += sizeof(unsigned short) * mhba->max_io;
+
+ mhba->tag_cmd = virmem;
+ virmem += sizeof(struct mvumi_cmd *) * mhba->max_io;
+
+ mhba->target_map = virmem;
+
+ mhba->fw_flag |= MVUMI_FW_ALLOC;
+ return 0;
+
+fail_alloc_dma_buf:
+ mvumi_release_mem_resource(mhba);
+ return -1;
+}
+
+static int mvumi_hs_process_page(struct mvumi_hba *mhba,
+ struct mvumi_hs_header *hs_header)
+{
+ struct mvumi_hs_page1 *hs_page1;
+ unsigned char page_checksum;
+
+ page_checksum = mvumi_calculate_checksum(hs_header,
+ hs_header->frame_length);
+ if (page_checksum != hs_header->checksum) {
+ dev_err(&mhba->pdev->dev, "checksum error\n");
+ return -1;
+ }
+
+ switch (hs_header->page_code) {
+ case HS_PAGE_FIRM_CAP:
+ hs_page1 = (struct mvumi_hs_page1 *) hs_header;
+
+ mhba->max_io = hs_page1->max_io_support;
+ mhba->list_num_io = hs_page1->cl_inout_list_depth;
+ mhba->max_transfer_size = hs_page1->max_transfer_size;
+ mhba->max_target_id = hs_page1->max_devices_support;
+ mhba->hba_capability = hs_page1->capability;
+ mhba->ib_max_size_setting = hs_page1->cl_in_max_entry_size;
+ mhba->ib_max_size = (1 << hs_page1->cl_in_max_entry_size) << 2;
+
+ mhba->ob_max_size_setting = hs_page1->cl_out_max_entry_size;
+ mhba->ob_max_size = (1 << hs_page1->cl_out_max_entry_size) << 2;
+
+ dev_dbg(&mhba->pdev->dev, "FW version:%d\n",
+ hs_page1->fw_ver.ver_build);
+
+ break;
+ default:
+ dev_err(&mhba->pdev->dev, "handshake: page code error\n");
+ return -1;
+ }
+ return 0;
+}
+
+/**
+ * mvumi_handshake - Move the FW to READY state
+ * @mhba: Adapter soft state
+ *
+ * During the initialization, FW passes can potentially be in any one of
+ * several possible states. If the FW in operational, waiting-for-handshake
+ * states, driver must take steps to bring it to ready state. Otherwise, it
+ * has to wait for the ready state.
+ */
+static int mvumi_handshake(struct mvumi_hba *mhba)
+{
+ unsigned int hs_state, tmp, hs_fun;
+ struct mvumi_hs_header *hs_header;
+ void *regs = mhba->mmio;
+
+ if (mhba->fw_state == FW_STATE_STARTING)
+ hs_state = HS_S_START;
+ else {
+ tmp = ioread32(regs + CPU_ARM_TO_PCIEA_MSG0);
+ hs_state = HS_GET_STATE(tmp);
+ dev_dbg(&mhba->pdev->dev, "handshake state[0x%x].\n", hs_state);
+ if (HS_GET_STATUS(tmp) != HS_STATUS_OK) {
+ mhba->fw_state = FW_STATE_STARTING;
+ return -1;
+ }
+ }
+
+ hs_fun = 0;
+ switch (hs_state) {
+ case HS_S_START:
+ mhba->fw_state = FW_STATE_HANDSHAKING;
+ HS_SET_STATUS(hs_fun, HS_STATUS_OK);
+ HS_SET_STATE(hs_fun, HS_S_RESET);
+ iowrite32(HANDSHAKE_SIGNATURE, regs + CPU_PCIEA_TO_ARM_MSG1);
+ iowrite32(hs_fun, regs + CPU_PCIEA_TO_ARM_MSG0);
+ iowrite32(DRBL_HANDSHAKE, regs + CPU_PCIEA_TO_ARM_DRBL_REG);
+ break;
+
+ case HS_S_RESET:
+ iowrite32(lower_32_bits(mhba->handshake_page_phys),
+ regs + CPU_PCIEA_TO_ARM_MSG1);
+ iowrite32(upper_32_bits(mhba->handshake_page_phys),
+ regs + CPU_ARM_TO_PCIEA_MSG1);
+ HS_SET_STATUS(hs_fun, HS_STATUS_OK);
+ HS_SET_STATE(hs_fun, HS_S_PAGE_ADDR);
+ iowrite32(hs_fun, regs + CPU_PCIEA_TO_ARM_MSG0);
+ iowrite32(DRBL_HANDSHAKE, regs + CPU_PCIEA_TO_ARM_DRBL_REG);
+
+ break;
+
+ case HS_S_PAGE_ADDR:
+ case HS_S_QUERY_PAGE:
+ case HS_S_SEND_PAGE:
+ hs_header = (struct mvumi_hs_header *) mhba->handshake_page;
+ if (hs_header->page_code == HS_PAGE_FIRM_CAP) {
+ mhba->hba_total_pages =
+ ((struct mvumi_hs_page1 *) hs_header)->total_pages;
+
+ if (mhba->hba_total_pages == 0)
+ mhba->hba_total_pages = HS_PAGE_TOTAL-1;
+ }
+
+ if (hs_state == HS_S_QUERY_PAGE) {
+ if (mvumi_hs_process_page(mhba, hs_header)) {
+ HS_SET_STATE(hs_fun, HS_S_ABORT);
+ return -1;
+ }
+ if (mvumi_init_data(mhba)) {
+ HS_SET_STATE(hs_fun, HS_S_ABORT);
+ return -1;
+ }
+ } else if (hs_state == HS_S_PAGE_ADDR) {
+ hs_header->page_code = 0;
+ mhba->hba_total_pages = HS_PAGE_TOTAL-1;
+ }
+
+ if ((hs_header->page_code + 1) <= mhba->hba_total_pages) {
+ hs_header->page_code++;
+ if (hs_header->page_code != HS_PAGE_FIRM_CAP) {
+ mvumi_hs_build_page(mhba, hs_header);
+ HS_SET_STATE(hs_fun, HS_S_SEND_PAGE);
+ } else
+ HS_SET_STATE(hs_fun, HS_S_QUERY_PAGE);
+ } else
+ HS_SET_STATE(hs_fun, HS_S_END);
+
+ HS_SET_STATUS(hs_fun, HS_STATUS_OK);
+ iowrite32(hs_fun, regs + CPU_PCIEA_TO_ARM_MSG0);
+ iowrite32(DRBL_HANDSHAKE, regs + CPU_PCIEA_TO_ARM_DRBL_REG);
+ break;
+
+ case HS_S_END:
+ /* Set communication list ISR */
+ tmp = ioread32(regs + CPU_ENPOINTA_MASK_REG);
+ tmp |= INT_MAP_COMAOUT | INT_MAP_COMAERR;
+ iowrite32(tmp, regs + CPU_ENPOINTA_MASK_REG);
+ iowrite32(mhba->list_num_io, mhba->ib_shadow);
+ /* Set InBound List Avaliable count shadow */
+ iowrite32(lower_32_bits(mhba->ib_shadow_phys),
+ regs + CLA_INB_AVAL_COUNT_BASEL);
+ iowrite32(upper_32_bits(mhba->ib_shadow_phys),
+ regs + CLA_INB_AVAL_COUNT_BASEH);
+
+ /* Set OutBound List Avaliable count shadow */
+ iowrite32((mhba->list_num_io-1) | CL_POINTER_TOGGLE,
+ mhba->ob_shadow);
+ iowrite32(lower_32_bits(mhba->ob_shadow_phys), regs + 0x5B0);
+ iowrite32(upper_32_bits(mhba->ob_shadow_phys), regs + 0x5B4);
+
+ mhba->ib_cur_slot = (mhba->list_num_io - 1) | CL_POINTER_TOGGLE;
+ mhba->ob_cur_slot = (mhba->list_num_io - 1) | CL_POINTER_TOGGLE;
+ mhba->fw_state = FW_STATE_STARTED;
+
+ break;
+ default:
+ dev_err(&mhba->pdev->dev, "unknown handshake state [0x%x].\n",
+ hs_state);
+ return -1;
+ }
+ return 0;
+}
+
+static unsigned char mvumi_handshake_event(struct mvumi_hba *mhba)
+{
+ unsigned int isr_status;
+ unsigned long before;
+
+ before = jiffies;
+ mvumi_handshake(mhba);
+ do {
+ isr_status = mhba->instancet->read_fw_status_reg(mhba->mmio);
+
+ if (mhba->fw_state == FW_STATE_STARTED)
+ return 0;
+ if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
+ dev_err(&mhba->pdev->dev,
+ "no handshake response at state 0x%x.\n",
+ mhba->fw_state);
+ dev_err(&mhba->pdev->dev,
+ "isr : global=0x%x,status=0x%x.\n",
+ mhba->global_isr, isr_status);
+ return -1;
+ }
+ rmb();
+ usleep_range(1000, 2000);
+ } while (!(isr_status & DRBL_HANDSHAKE_ISR));
+
+ return 0;
+}
+
+static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba)
+{
+ void *regs = mhba->mmio;
+ unsigned int tmp;
+ unsigned long before;
+
+ before = jiffies;
+ tmp = ioread32(regs + CPU_ARM_TO_PCIEA_MSG1);
+ while ((tmp != HANDSHAKE_READYSTATE) && (tmp != HANDSHAKE_DONESTATE)) {
+ if (tmp != HANDSHAKE_READYSTATE)
+ iowrite32(DRBL_MU_RESET,
+ regs + CPU_PCIEA_TO_ARM_DRBL_REG);
+ if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
+ dev_err(&mhba->pdev->dev,
+ "invalid signature [0x%x].\n", tmp);
+ return -1;
+ }
+ usleep_range(1000, 2000);
+ rmb();
+ tmp = ioread32(regs + CPU_ARM_TO_PCIEA_MSG1);
+ }
+
+ mhba->fw_state = FW_STATE_STARTING;
+ dev_dbg(&mhba->pdev->dev, "start firmware handshake...\n");
+ do {
+ if (mvumi_handshake_event(mhba)) {
+ dev_err(&mhba->pdev->dev,
+ "handshake failed at state 0x%x.\n",
+ mhba->fw_state);
+ return -1;
+ }
+ } while (mhba->fw_state != FW_STATE_STARTED);
+
+ dev_dbg(&mhba->pdev->dev, "firmware handshake done\n");
+
+ return 0;
+}
+
+static unsigned char mvumi_start(struct mvumi_hba *mhba)
+{
+ void *regs = mhba->mmio;
+ unsigned int tmp;
+ /* clear Door bell */
+ tmp = ioread32(regs + CPU_ARM_TO_PCIEA_DRBL_REG);
+ iowrite32(tmp, regs + CPU_ARM_TO_PCIEA_DRBL_REG);
+
+ iowrite32(0x3FFFFFFF, regs + CPU_ARM_TO_PCIEA_MASK_REG);
+ tmp = ioread32(regs + CPU_ENPOINTA_MASK_REG) | INT_MAP_DL_CPU2PCIEA;
+ iowrite32(tmp, regs + CPU_ENPOINTA_MASK_REG);
+ if (mvumi_check_handshake(mhba))
+ return -1;
+
+ return 0;
+}
+
+/**
+ * mvumi_complete_cmd - Completes a command
+ * @mhba: Adapter soft state
+ * @cmd: Command to be completed
+ */
+static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
+ struct mvumi_rsp_frame *ob_frame)
+{
+ struct scsi_cmnd *scmd = cmd->scmd;
+
+ cmd->scmd->SCp.ptr = NULL;
+ scmd->result = ob_frame->req_status;
+
+ switch (ob_frame->req_status) {
+ case SAM_STAT_GOOD:
+ scmd->result |= DID_OK << 16;
+ break;
+ case SAM_STAT_BUSY:
+ scmd->result |= DID_BUS_BUSY << 16;
+ break;
+ case SAM_STAT_CHECK_CONDITION:
+ scmd->result |= (DID_OK << 16);
+ if (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) {
+ memcpy(cmd->scmd->sense_buffer, ob_frame->payload,
+ sizeof(struct mvumi_sense_data));
+ scmd->result |= (DRIVER_SENSE << 24);
+ }
+ break;
+ default:
+ scmd->result |= (DRIVER_INVALID << 24) | (DID_ABORT << 16);
+ break;
+ }
+
+ if (scsi_bufflen(scmd)) {
+ if (scsi_sg_count(scmd)) {
+ pci_unmap_sg(mhba->pdev,
+ scsi_sglist(scmd),
+ scsi_sg_count(scmd),
+ (int) scmd->sc_data_direction);
+ } else {
+ pci_unmap_single(mhba->pdev,
+ scmd->SCp.dma_handle,
+ scsi_bufflen(scmd),
+ (int) scmd->sc_data_direction);
+
+ scmd->SCp.dma_handle = 0;
+ }
+ }
+ cmd->scmd->scsi_done(scmd);
+ mvumi_return_cmd(mhba, cmd);
+}
+static void mvumi_complete_internal_cmd(struct mvumi_hba *mhba,
+ struct mvumi_cmd *cmd,
+ struct mvumi_rsp_frame *ob_frame)
+{
+ if (atomic_read(&cmd->sync_cmd)) {
+ cmd->cmd_status = ob_frame->req_status;
+
+ if ((ob_frame->req_status == SAM_STAT_CHECK_CONDITION) &&
+ (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) &&
+ cmd->data_buf) {
+ memcpy(cmd->data_buf, ob_frame->payload,
+ sizeof(struct mvumi_sense_data));
+ }
+ atomic_dec(&cmd->sync_cmd);
+ wake_up(&mhba->int_cmd_wait_q);
+ }
+}
+
+static void mvumi_show_event(struct mvumi_hba *mhba,
+ struct mvumi_driver_event *ptr)
+{
+ unsigned int i;
+
+ dev_warn(&mhba->pdev->dev,
+ "Event[0x%x] id[0x%x] severity[0x%x] device id[0x%x]\n",
+ ptr->sequence_no, ptr->event_id, ptr->severity, ptr->device_id);
+ if (ptr->param_count) {
+ printk(KERN_WARNING "Event param(len 0x%x): ",
+ ptr->param_count);
+ for (i = 0; i < ptr->param_count; i++)
+ printk(KERN_WARNING "0x%x ", ptr->params[i]);
+
+ printk(KERN_WARNING "\n");
+ }
+
+ if (ptr->sense_data_length) {
+ printk(KERN_WARNING "Event sense data(len 0x%x): ",
+ ptr->sense_data_length);
+ for (i = 0; i < ptr->sense_data_length; i++)
+ printk(KERN_WARNING "0x%x ", ptr->sense_data[i]);
+ printk(KERN_WARNING "\n");
+ }
+}
+
+static void mvumi_notification(struct mvumi_hba *mhba, u8 msg, void *buffer)
+{
+ if (msg == APICDB1_EVENT_GETEVENT) {
+ int i, count;
+ struct mvumi_driver_event *param = NULL;
+ struct mvumi_event_req *er = buffer;
+ count = er->count;
+ if (count > MAX_EVENTS_RETURNED) {
+ dev_err(&mhba->pdev->dev, "event count[0x%x] is bigger"
+ " than max event count[0x%x].\n",
+ count, MAX_EVENTS_RETURNED);
+ return;
+ }
+ for (i = 0; i < count; i++) {
+ param = &er->events[i];
+ mvumi_show_event(mhba, param);
+ }
+ }
+}
+
+static int mvumi_get_event(struct mvumi_hba *mhba, unsigned char msg)
+{
+ struct mvumi_cmd *cmd;
+ struct mvumi_msg_frame *frame;
+
+ cmd = mvumi_create_internal_cmd(mhba, 512);
+ if (!cmd)
+ return -1;
+ cmd->scmd = NULL;
+ cmd->cmd_status = REQ_STATUS_PENDING;
+ atomic_set(&cmd->sync_cmd, 0);
+ frame = cmd->frame;
+ frame->device_id = 0;
+ frame->cmd_flag = CMD_FLAG_DATA_IN;
+ frame->req_function = CL_FUN_SCSI_CMD;
+ frame->cdb_length = MAX_COMMAND_SIZE;
+ frame->data_transfer_length = sizeof(struct mvumi_event_req);
+ memset(frame->cdb, 0, MAX_COMMAND_SIZE);
+ frame->cdb[0] = APICDB0_EVENT;
+ frame->cdb[1] = msg;
+ mvumi_issue_blocked_cmd(mhba, cmd);
+
+ if (cmd->cmd_status != SAM_STAT_GOOD)
+ dev_err(&mhba->pdev->dev, "get event failed, status=0x%x.\n",
+ cmd->cmd_status);
+ else
+ mvumi_notification(mhba, cmd->frame->cdb[1], cmd->data_buf);
+
+ mvumi_delete_internal_cmd(mhba, cmd);
+ return 0;
+}
+
+static void mvumi_scan_events(struct work_struct *work)
+{
+ struct mvumi_events_wq *mu_ev =
+ container_of(work, struct mvumi_events_wq, work_q);
+
+ mvumi_get_event(mu_ev->mhba, mu_ev->event);
+ kfree(mu_ev);
+}
+
+static void mvumi_launch_events(struct mvumi_hba *mhba, u8 msg)
+{
+ struct mvumi_events_wq *mu_ev;
+
+ mu_ev = kzalloc(sizeof(*mu_ev), GFP_ATOMIC);
+ if (mu_ev) {
+ INIT_WORK(&mu_ev->work_q, mvumi_scan_events);
+ mu_ev->mhba = mhba;
+ mu_ev->event = msg;
+ mu_ev->param = NULL;
+ schedule_work(&mu_ev->work_q);
+ }
+}
+
+static void mvumi_handle_clob(struct mvumi_hba *mhba)
+{
+ struct mvumi_rsp_frame *ob_frame;
+ struct mvumi_cmd *cmd;
+ struct mvumi_ob_data *pool;
+
+ while (!list_empty(&mhba->free_ob_list)) {
+ pool = list_first_entry(&mhba->free_ob_list,
+ struct mvumi_ob_data, list);
+ list_del_init(&pool->list);
+ list_add_tail(&pool->list, &mhba->ob_data_list);
+
+ ob_frame = (struct mvumi_rsp_frame *) &pool->data[0];
+ cmd = mhba->tag_cmd[ob_frame->tag];
+
+ atomic_dec(&mhba->fw_outstanding);
+ mhba->tag_cmd[ob_frame->tag] = 0;
+ tag_release_one(mhba, &mhba->tag_pool, ob_frame->tag);
+ if (cmd->scmd)
+ mvumi_complete_cmd(mhba, cmd, ob_frame);
+ else
+ mvumi_complete_internal_cmd(mhba, cmd, ob_frame);
+ }
+ mhba->instancet->fire_cmd(mhba, NULL);
+}
+
+static irqreturn_t mvumi_isr_handler(int irq, void *devp)
+{
+ struct mvumi_hba *mhba = (struct mvumi_hba *) devp;
+ unsigned long flags;
+
+ spin_lock_irqsave(mhba->shost->host_lock, flags);
+ if (unlikely(mhba->instancet->clear_intr(mhba) || !mhba->global_isr)) {
+ spin_unlock_irqrestore(mhba->shost->host_lock, flags);
+ return IRQ_NONE;
+ }
+
+ if (mhba->global_isr & INT_MAP_DL_CPU2PCIEA) {
+ if (mhba->isr_status & DRBL_HANDSHAKE_ISR) {
+ dev_warn(&mhba->pdev->dev, "enter handshake again!\n");
+ mvumi_handshake(mhba);
+ }
+ if (mhba->isr_status & DRBL_EVENT_NOTIFY)
+ mvumi_launch_events(mhba, APICDB1_EVENT_GETEVENT);
+ }
+
+ if (mhba->global_isr & INT_MAP_COMAOUT)
+ mvumi_receive_ob_list_entry(mhba);
+
+ mhba->global_isr = 0;
+ mhba->isr_status = 0;
+ if (mhba->fw_state == FW_STATE_STARTED)
+ mvumi_handle_clob(mhba);
+ spin_unlock_irqrestore(mhba->shost->host_lock, flags);
+ return IRQ_HANDLED;
+}
+
+static enum mvumi_qc_result mvumi_send_command(struct mvumi_hba *mhba,
+ struct mvumi_cmd *cmd)
+{
+ void *ib_entry;
+ struct mvumi_msg_frame *ib_frame;
+ unsigned int frame_len;
+
+ ib_frame = cmd->frame;
+ if (unlikely(mhba->fw_state != FW_STATE_STARTED)) {
+ dev_dbg(&mhba->pdev->dev, "firmware not ready.\n");
+ return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
+ }
+ if (tag_is_empty(&mhba->tag_pool)) {
+ dev_dbg(&mhba->pdev->dev, "no free tag.\n");
+ return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
+ }
+ if (mvumi_get_ib_list_entry(mhba, &ib_entry))
+ return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
+
+ cmd->frame->tag = tag_get_one(mhba, &mhba->tag_pool);
+ cmd->frame->request_id = mhba->io_seq++;
+ cmd->request_id = cmd->frame->request_id;
+ mhba->tag_cmd[cmd->frame->tag] = cmd;
+ frame_len = sizeof(*ib_frame) - 4 +
+ ib_frame->sg_counts * sizeof(struct mvumi_sgl);
+ memcpy(ib_entry, ib_frame, frame_len);
+ return MV_QUEUE_COMMAND_RESULT_SENT;
+}
+
+static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd)
+{
+ unsigned short num_of_cl_sent = 0;
+ enum mvumi_qc_result result;
+
+ if (cmd)
+ list_add_tail(&cmd->queue_pointer, &mhba->waiting_req_list);
+
+ while (!list_empty(&mhba->waiting_req_list)) {
+ cmd = list_first_entry(&mhba->waiting_req_list,
+ struct mvumi_cmd, queue_pointer);
+ list_del_init(&cmd->queue_pointer);
+ result = mvumi_send_command(mhba, cmd);
+ switch (result) {
+ case MV_QUEUE_COMMAND_RESULT_SENT:
+ num_of_cl_sent++;
+ break;
+ case MV_QUEUE_COMMAND_RESULT_NO_RESOURCE:
+ list_add(&cmd->queue_pointer, &mhba->waiting_req_list);
+ if (num_of_cl_sent > 0)
+ mvumi_send_ib_list_entry(mhba);
+
+ return;
+ }
+ }
+ if (num_of_cl_sent > 0)
+ mvumi_send_ib_list_entry(mhba);
+}
+
+/**
+ * mvumi_enable_intr - Enables interrupts
+ * @regs: FW register set
+ */
+static void mvumi_enable_intr(void *regs)
+{
+ unsigned int mask;
+
+ iowrite32(0x3FFFFFFF, regs + CPU_ARM_TO_PCIEA_MASK_REG);
+ mask = ioread32(regs + CPU_ENPOINTA_MASK_REG);
+ mask |= INT_MAP_DL_CPU2PCIEA | INT_MAP_COMAOUT | INT_MAP_COMAERR;
+ iowrite32(mask, regs + CPU_ENPOINTA_MASK_REG);
+}
+
+/**
+ * mvumi_disable_intr -Disables interrupt
+ * @regs: FW register set
+ */
+static void mvumi_disable_intr(void *regs)
+{
+ unsigned int mask;
+
+ iowrite32(0, regs + CPU_ARM_TO_PCIEA_MASK_REG);
+ mask = ioread32(regs + CPU_ENPOINTA_MASK_REG);
+ mask &= ~(INT_MAP_DL_CPU2PCIEA | INT_MAP_COMAOUT | INT_MAP_COMAERR);
+ iowrite32(mask, regs + CPU_ENPOINTA_MASK_REG);
+}
+
+static int mvumi_clear_intr(void *extend)
+{
+ struct mvumi_hba *mhba = (struct mvumi_hba *) extend;
+ unsigned int status, isr_status = 0, tmp = 0;
+ void *regs = mhba->mmio;
+
+ status = ioread32(regs + CPU_MAIN_INT_CAUSE_REG);
+ if (!(status & INT_MAP_MU) || status == 0xFFFFFFFF)
+ return 1;
+ if (unlikely(status & INT_MAP_COMAERR)) {
+ tmp = ioread32(regs + CLA_ISR_CAUSE);
+ if (tmp & (CLIC_IN_ERR_IRQ | CLIC_OUT_ERR_IRQ))
+ iowrite32(tmp & (CLIC_IN_ERR_IRQ | CLIC_OUT_ERR_IRQ),
+ regs + CLA_ISR_CAUSE);
+ status ^= INT_MAP_COMAERR;
+ /* inbound or outbound parity error, command will timeout */
+ }
+ if (status & INT_MAP_COMAOUT) {
+ tmp = ioread32(regs + CLA_ISR_CAUSE);
+ if (tmp & CLIC_OUT_IRQ)
+ iowrite32(tmp & CLIC_OUT_IRQ, regs + CLA_ISR_CAUSE);
+ }
+ if (status & INT_MAP_DL_CPU2PCIEA) {
+ isr_status = ioread32(regs + CPU_ARM_TO_PCIEA_DRBL_REG);
+ if (isr_status)
+ iowrite32(isr_status, regs + CPU_ARM_TO_PCIEA_DRBL_REG);
+ }
+
+ mhba->global_isr = status;
+ mhba->isr_status = isr_status;
+
+ return 0;
+}
+
+/**
+ * mvumi_read_fw_status_reg - returns the current FW status value
+ * @regs: FW register set
+ */
+static unsigned int mvumi_read_fw_status_reg(void *regs)
+{
+ unsigned int status;
+
+ status = ioread32(regs + CPU_ARM_TO_PCIEA_DRBL_REG);
+ if (status)
+ iowrite32(status, regs + CPU_ARM_TO_PCIEA_DRBL_REG);
+ return status;
+}
+
+static struct mvumi_instance_template mvumi_instance_template = {
+ .fire_cmd = mvumi_fire_cmd,
+ .enable_intr = mvumi_enable_intr,
+ .disable_intr = mvumi_disable_intr,
+ .clear_intr = mvumi_clear_intr,
+ .read_fw_status_reg = mvumi_read_fw_status_reg,
+};
+
+static int mvumi_slave_configure(struct scsi_device *sdev)
+{
+ struct mvumi_hba *mhba;
+ unsigned char bitcount = sizeof(unsigned char) * 8;
+
+ mhba = (struct mvumi_hba *) sdev->host->hostdata;
+ if (sdev->id >= mhba->max_target_id)
+ return -EINVAL;
+
+ mhba->target_map[sdev->id / bitcount] |= (1 << (sdev->id % bitcount));
+ return 0;
+}
+
+/**
+ * mvumi_build_frame - Prepares a direct cdb (DCDB) command
+ * @mhba: Adapter soft state
+ * @scmd: SCSI command
+ * @cmd: Command to be prepared in
+ *
+ * This function prepares CDB commands. These are typcially pass-through
+ * commands to the devices.
+ */
+static unsigned char mvumi_build_frame(struct mvumi_hba *mhba,
+ struct scsi_cmnd *scmd, struct mvumi_cmd *cmd)
+{
+ struct mvumi_msg_frame *pframe;
+
+ cmd->scmd = scmd;
+ cmd->cmd_status = REQ_STATUS_PENDING;
+ pframe = cmd->frame;
+ pframe->device_id = ((unsigned short) scmd->device->id) |
+ (((unsigned short) scmd->device->lun) << 8);
+ pframe->cmd_flag = 0;
+
+ switch (scmd->sc_data_direction) {
+ case DMA_NONE:
+ pframe->cmd_flag |= CMD_FLAG_NON_DATA;
+ break;
+ case DMA_FROM_DEVICE:
+ pframe->cmd_flag |= CMD_FLAG_DATA_IN;
+ break;
+ case DMA_TO_DEVICE:
+ pframe->cmd_flag |= CMD_FLAG_DATA_OUT;
+ break;
+ case DMA_BIDIRECTIONAL:
+ default:
+ dev_warn(&mhba->pdev->dev, "unexpected data direction[%d] "
+ "cmd[0x%x]\n", scmd->sc_data_direction, scmd->cmnd[0]);
+ goto error;
+ }
+
+ pframe->cdb_length = scmd->cmd_len;
+ memcpy(pframe->cdb, scmd->cmnd, pframe->cdb_length);
+ pframe->req_function = CL_FUN_SCSI_CMD;
+ if (scsi_bufflen(scmd)) {
+ if (mvumi_make_sgl(mhba, scmd, &pframe->payload[0],
+ &pframe->sg_counts))
+ goto error;
+
+ pframe->data_transfer_length = scsi_bufflen(scmd);
+ } else {
+ pframe->sg_counts = 0;
+ pframe->data_transfer_length = 0;
+ }
+ return 0;
+
+error:
+ scmd->result = (DID_OK << 16) | (DRIVER_SENSE << 24) |
+ SAM_STAT_CHECK_CONDITION;
+ scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x24,
+ 0);
+ return -1;
+}
+
+/**
+ * mvumi_queue_command - Queue entry point
+ * @scmd: SCSI command to be queued
+ * @done: Callback entry point
+ */
+static int mvumi_queue_command(struct Scsi_Host *shost,
+ struct scsi_cmnd *scmd)
+{
+ struct mvumi_cmd *cmd;
+ struct mvumi_hba *mhba;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(shost->host_lock, irq_flags);
+ scsi_cmd_get_serial(shost, scmd);
+
+ mhba = (struct mvumi_hba *) shost->hostdata;
+ scmd->result = 0;
+ cmd = mvumi_get_cmd(mhba);
+ if (unlikely(!cmd)) {
+ spin_unlock_irqrestore(shost->host_lock, irq_flags);
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
+ if (unlikely(mvumi_build_frame(mhba, scmd, cmd)))
+ goto out_return_cmd;
+
+ cmd->scmd = scmd;
+ scmd->SCp.ptr = (char *) cmd;
+ mhba->instancet->fire_cmd(mhba, cmd);
+ spin_unlock_irqrestore(shost->host_lock, irq_flags);
+ return 0;
+
+out_return_cmd:
+ mvumi_return_cmd(mhba, cmd);
+ scmd->scsi_done(scmd);
+ spin_unlock_irqrestore(shost->host_lock, irq_flags);
+ return 0;
+}
+
+static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd)
+{
+ struct mvumi_cmd *cmd = (struct mvumi_cmd *) scmd->SCp.ptr;
+ struct Scsi_Host *host = scmd->device->host;
+ struct mvumi_hba *mhba = shost_priv(host);
+ unsigned long flags;
+
+ spin_lock_irqsave(mhba->shost->host_lock, flags);
+
+ if (mhba->tag_cmd[cmd->frame->tag]) {
+ mhba->tag_cmd[cmd->frame->tag] = 0;
+ tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
+ }
+ if (!list_empty(&cmd->queue_pointer))
+ list_del_init(&cmd->queue_pointer);
+ else
+ atomic_dec(&mhba->fw_outstanding);
+
+ scmd->result = (DRIVER_INVALID << 24) | (DID_ABORT << 16);
+ scmd->SCp.ptr = NULL;
+ if (scsi_bufflen(scmd)) {
+ if (scsi_sg_count(scmd)) {
+ pci_unmap_sg(mhba->pdev,
+ scsi_sglist(scmd),
+ scsi_sg_count(scmd),
+ (int)scmd->sc_data_direction);
+ } else {
+ pci_unmap_single(mhba->pdev,
+ scmd->SCp.dma_handle,
+ scsi_bufflen(scmd),
+ (int)scmd->sc_data_direction);
+
+ scmd->SCp.dma_handle = 0;
+ }
+ }
+ mvumi_return_cmd(mhba, cmd);
+ spin_unlock_irqrestore(mhba->shost->host_lock, flags);
+
+ return BLK_EH_NOT_HANDLED;
+}
+
+static int
+mvumi_bios_param(struct scsi_device *sdev, struct block_device *bdev,
+ sector_t capacity, int geom[])
+{
+ int heads, sectors;
+ sector_t cylinders;
+ unsigned long tmp;
+
+ heads = 64;
+ sectors = 32;
+ tmp = heads * sectors;
+ cylinders = capacity;
+ sector_div(cylinders, tmp);
+
+ if (capacity >= 0x200000) {
+ heads = 255;
+ sectors = 63;
+ tmp = heads * sectors;
+ cylinders = capacity;
+ sector_div(cylinders, tmp);
+ }
+ geom[0] = heads;
+ geom[1] = sectors;
+ geom[2] = cylinders;
+
+ return 0;
+}
+
+static struct scsi_host_template mvumi_template = {
+
+ .module = THIS_MODULE,
+ .name = "Marvell Storage Controller",
+ .slave_configure = mvumi_slave_configure,
+ .queuecommand = mvumi_queue_command,
+ .eh_host_reset_handler = mvumi_host_reset,
+ .bios_param = mvumi_bios_param,
+ .this_id = -1,
+};
+
+static struct scsi_transport_template mvumi_transport_template = {
+ .eh_timed_out = mvumi_timed_out,
+};
+
+/**
+ * mvumi_init_fw - Initializes the FW
+ * @mhba: Adapter soft state
+ *
+ * This is the main function for initializing firmware.
+ */
+static int mvumi_init_fw(struct mvumi_hba *mhba)
+{
+ int ret = 0;
+
+ if (pci_request_regions(mhba->pdev, MV_DRIVER_NAME)) {
+ dev_err(&mhba->pdev->dev, "IO memory region busy!\n");
+ return -EBUSY;
+ }
+ ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr);
+ if (ret)
+ goto fail_ioremap;
+
+ mhba->mmio = mhba->base_addr[0];
+
+ switch (mhba->pdev->device) {
+ case PCI_DEVICE_ID_MARVELL_MV9143:
+ mhba->instancet = &mvumi_instance_template;
+ mhba->io_seq = 0;
+ mhba->max_sge = MVUMI_MAX_SG_ENTRY;
+ mhba->request_id_enabled = 1;
+ break;
+ default:
+ dev_err(&mhba->pdev->dev, "device 0x%x not supported!\n",
+ mhba->pdev->device);
+ mhba->instancet = NULL;
+ ret = -EINVAL;
+ goto fail_alloc_mem;
+ }
+ dev_dbg(&mhba->pdev->dev, "device id : %04X is found.\n",
+ mhba->pdev->device);
+
+ mhba->handshake_page = kzalloc(HSP_MAX_SIZE, GFP_KERNEL);
+ if (!mhba->handshake_page) {
+ dev_err(&mhba->pdev->dev,
+ "failed to allocate memory for handshake\n");
+ ret = -ENOMEM;
+ goto fail_alloc_mem;
+ }
+ mhba->handshake_page_phys = virt_to_phys(mhba->handshake_page);
+
+ if (mvumi_start(mhba)) {
+ ret = -EINVAL;
+ goto fail_ready_state;
+ }
+ ret = mvumi_alloc_cmds(mhba);
+ if (ret)
+ goto fail_ready_state;
+
+ return 0;
+
+fail_ready_state:
+ mvumi_release_mem_resource(mhba);
+ kfree(mhba->handshake_page);
+fail_alloc_mem:
+ mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
+fail_ioremap:
+ pci_release_regions(mhba->pdev);
+
+ return ret;
+}
+
+/**
+ * mvumi_io_attach - Attaches this driver to SCSI mid-layer
+ * @mhba: Adapter soft state
+ */
+static int mvumi_io_attach(struct mvumi_hba *mhba)
+{
+ struct Scsi_Host *host = mhba->shost;
+ int ret;
+ unsigned int max_sg = (mhba->ib_max_size + 4 -
+ sizeof(struct mvumi_msg_frame)) / sizeof(struct mvumi_sgl);
+
+ host->irq = mhba->pdev->irq;
+ host->unique_id = mhba->unique_id;
+ host->can_queue = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
+ host->sg_tablesize = mhba->max_sge > max_sg ? max_sg : mhba->max_sge;
+ host->max_sectors = mhba->max_transfer_size / 512;
+ host->cmd_per_lun = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
+ host->max_id = mhba->max_target_id;
+ host->max_cmd_len = MAX_COMMAND_SIZE;
+ host->transportt = &mvumi_transport_template;
+
+ ret = scsi_add_host(host, &mhba->pdev->dev);
+ if (ret) {
+ dev_err(&mhba->pdev->dev, "scsi_add_host failed\n");
+ return ret;
+ }
+ mhba->fw_flag |= MVUMI_FW_ATTACH;
+ scsi_scan_host(host);
+
+ return 0;
+}
+
+/**
+ * mvumi_probe_one - PCI hotplug entry point
+ * @pdev: PCI device structure
+ * @id: PCI ids of supported hotplugged adapter
+ */
+static int __devinit mvumi_probe_one(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct Scsi_Host *host;
+ struct mvumi_hba *mhba;
+ int ret;
+
+ dev_dbg(&pdev->dev, " %#4.04x:%#4.04x:%#4.04x:%#4.04x: ",
+ pdev->vendor, pdev->device, pdev->subsystem_vendor,
+ pdev->subsystem_device);
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ pci_set_master(pdev);
+
+ if (IS_DMA64) {
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (ret) {
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret)
+ goto fail_set_dma_mask;
+ }
+ } else {
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret)
+ goto fail_set_dma_mask;
+ }
+
+ host = scsi_host_alloc(&mvumi_template, sizeof(*mhba));
+ if (!host) {
+ dev_err(&pdev->dev, "scsi_host_alloc failed\n");
+ ret = -ENOMEM;
+ goto fail_alloc_instance;
+ }
+ mhba = shost_priv(host);
+
+ INIT_LIST_HEAD(&mhba->cmd_pool);
+ INIT_LIST_HEAD(&mhba->ob_data_list);
+ INIT_LIST_HEAD(&mhba->free_ob_list);
+ INIT_LIST_HEAD(&mhba->res_list);
+ INIT_LIST_HEAD(&mhba->waiting_req_list);
+ atomic_set(&mhba->fw_outstanding, 0);
+ init_waitqueue_head(&mhba->int_cmd_wait_q);
+
+ mhba->pdev = pdev;
+ mhba->shost = host;
+ mhba->unique_id = pdev->bus->number << 8 | pdev->devfn;
+
+ ret = mvumi_init_fw(mhba);
+ if (ret)
+ goto fail_init_fw;
+
+ ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED,
+ "mvumi", mhba);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register IRQ\n");
+ goto fail_init_irq;
+ }
+ mhba->instancet->enable_intr(mhba->mmio);
+ pci_set_drvdata(pdev, mhba);
+
+ ret = mvumi_io_attach(mhba);
+ if (ret)
+ goto fail_io_attach;
+ dev_dbg(&pdev->dev, "probe mvumi driver successfully.\n");
+
+ return 0;
+
+fail_io_attach:
+ pci_set_drvdata(pdev, NULL);
+ mhba->instancet->disable_intr(mhba->mmio);
+ free_irq(mhba->pdev->irq, mhba);
+fail_init_irq:
+ mvumi_release_fw(mhba);
+fail_init_fw:
+ scsi_host_put(host);
+
+fail_alloc_instance:
+fail_set_dma_mask:
+ pci_disable_device(pdev);
+
+ return ret;
+}
+
+static void mvumi_detach_one(struct pci_dev *pdev)
+{
+ struct Scsi_Host *host;
+ struct mvumi_hba *mhba;
+
+ mhba = pci_get_drvdata(pdev);
+ host = mhba->shost;
+ scsi_remove_host(mhba->shost);
+ mvumi_flush_cache(mhba);
+
+ mhba->instancet->disable_intr(mhba->mmio);
+ free_irq(mhba->pdev->irq, mhba);
+ mvumi_release_fw(mhba);
+ scsi_host_put(host);
+ pci_set_drvdata(pdev, NULL);
+ pci_disable_device(pdev);
+ dev_dbg(&pdev->dev, "driver is removed!\n");
+}
+
+/**
+ * mvumi_shutdown - Shutdown entry point
+ * @device: Generic device structure
+ */
+static void mvumi_shutdown(struct pci_dev *pdev)
+{
+ struct mvumi_hba *mhba = pci_get_drvdata(pdev);
+
+ mvumi_flush_cache(mhba);
+}
+
+static int mvumi_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct mvumi_hba *mhba = NULL;
+
+ mhba = pci_get_drvdata(pdev);
+ mvumi_flush_cache(mhba);
+
+ pci_set_drvdata(pdev, mhba);
+ mhba->instancet->disable_intr(mhba->mmio);
+ free_irq(mhba->pdev->irq, mhba);
+ mvumi_unmap_pci_addr(pdev, mhba->base_addr);
+ pci_release_regions(pdev);
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ return 0;
+}
+
+static int mvumi_resume(struct pci_dev *pdev)
+{
+ int ret;
+ struct mvumi_hba *mhba = NULL;
+
+ mhba = pci_get_drvdata(pdev);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_enable_wake(pdev, PCI_D0, 0);
+ pci_restore_state(pdev);
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "enable device failed\n");
+ return ret;
+ }
+ pci_set_master(pdev);
+ if (IS_DMA64) {
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ if (ret) {
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret)
+ goto fail;
+ }
+ } else {
+ ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (ret)
+ goto fail;
+ }
+ ret = pci_request_regions(mhba->pdev, MV_DRIVER_NAME);
+ if (ret)
+ goto fail;
+ ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr);
+ if (ret)
+ goto release_regions;
+
+ mhba->mmio = mhba->base_addr[0];
+ mvumi_reset(mhba->mmio);
+
+ if (mvumi_start(mhba)) {
+ ret = -EINVAL;
+ goto unmap_pci_addr;
+ }
+
+ ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED,
+ "mvumi", mhba);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register IRQ\n");
+ goto unmap_pci_addr;
+ }
+ mhba->instancet->enable_intr(mhba->mmio);
+
+ return 0;
+
+unmap_pci_addr:
+ mvumi_unmap_pci_addr(pdev, mhba->base_addr);
+release_regions:
+ pci_release_regions(pdev);
+fail:
+ pci_disable_device(pdev);
+
+ return ret;
+}
+
+static struct pci_driver mvumi_pci_driver = {
+
+ .name = MV_DRIVER_NAME,
+ .id_table = mvumi_pci_table,
+ .probe = mvumi_probe_one,
+ .remove = __devexit_p(mvumi_detach_one),
+ .shutdown = mvumi_shutdown,
+#ifdef CONFIG_PM
+ .suspend = mvumi_suspend,
+ .resume = mvumi_resume,
+#endif
+};
+
+/**
+ * mvumi_init - Driver load entry point
+ */
+static int __init mvumi_init(void)
+{
+ return pci_register_driver(&mvumi_pci_driver);
+}
+
+/**
+ * mvumi_exit - Driver unload entry point
+ */
+static void __exit mvumi_exit(void)
+{
+
+ pci_unregister_driver(&mvumi_pci_driver);
+}
+
+module_init(mvumi_init);
+module_exit(mvumi_exit);
diff --git a/drivers/scsi/mvumi.h b/drivers/scsi/mvumi.h
new file mode 100644
index 0000000..10b9237
--- /dev/null
+++ b/drivers/scsi/mvumi.h
@@ -0,0 +1,505 @@
+/*
+ * Marvell UMI head file
+ *
+ * Copyright 2011 Marvell. <jyli@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+ */
+
+#ifndef MVUMI_H
+#define MVUMI_H
+
+#define MAX_BASE_ADDRESS 6
+
+#define VER_MAJOR 1
+#define VER_MINOR 1
+#define VER_OEM 0
+#define VER_BUILD 1500
+
+#define MV_DRIVER_NAME "mvumi"
+#define PCI_VENDOR_ID_MARVELL_2 0x1b4b
+#define PCI_DEVICE_ID_MARVELL_MV9143 0x9143
+
+#define MVUMI_INTERNAL_CMD_WAIT_TIME 45
+
+#define IS_DMA64 (sizeof(dma_addr_t) == 8)
+
+enum mvumi_qc_result {
+ MV_QUEUE_COMMAND_RESULT_SENT = 0,
+ MV_QUEUE_COMMAND_RESULT_NO_RESOURCE,
+};
+
+enum {
+ /*******************************************/
+
+ /* ARM Mbus Registers Map */
+
+ /*******************************************/
+ CPU_MAIN_INT_CAUSE_REG = 0x20200,
+ CPU_MAIN_IRQ_MASK_REG = 0x20204,
+ CPU_MAIN_FIQ_MASK_REG = 0x20208,
+ CPU_ENPOINTA_MASK_REG = 0x2020C,
+ CPU_ENPOINTB_MASK_REG = 0x20210,
+
+ INT_MAP_COMAERR = 1 << 6,
+ INT_MAP_COMAIN = 1 << 7,
+ INT_MAP_COMAOUT = 1 << 8,
+ INT_MAP_COMBERR = 1 << 9,
+ INT_MAP_COMBIN = 1 << 10,
+ INT_MAP_COMBOUT = 1 << 11,
+
+ INT_MAP_COMAINT = (INT_MAP_COMAOUT | INT_MAP_COMAERR),
+ INT_MAP_COMBINT = (INT_MAP_COMBOUT | INT_MAP_COMBIN | INT_MAP_COMBERR),
+
+ INT_MAP_DL_PCIEA2CPU = 1 << 0,
+ INT_MAP_DL_CPU2PCIEA = 1 << 1,
+
+ /***************************************/
+
+ /* ARM Doorbell Registers Map */
+
+ /***************************************/
+ CPU_PCIEA_TO_ARM_DRBL_REG = 0x20400,
+ CPU_PCIEA_TO_ARM_MASK_REG = 0x20404,
+ CPU_ARM_TO_PCIEA_DRBL_REG = 0x20408,
+ CPU_ARM_TO_PCIEA_MASK_REG = 0x2040C,
+
+ DRBL_HANDSHAKE = 1 << 0,
+ DRBL_SOFT_RESET = 1 << 1,
+ DRBL_BUS_CHANGE = 1 << 2,
+ DRBL_EVENT_NOTIFY = 1 << 3,
+ DRBL_MU_RESET = 1 << 4,
+ DRBL_HANDSHAKE_ISR = DRBL_HANDSHAKE,
+
+ CPU_PCIEA_TO_ARM_MSG0 = 0x20430,
+ CPU_PCIEA_TO_ARM_MSG1 = 0x20434,
+ CPU_ARM_TO_PCIEA_MSG0 = 0x20438,
+ CPU_ARM_TO_PCIEA_MSG1 = 0x2043C,
+
+ /*******************************************/
+
+ /* ARM Communication List Registers Map */
+
+ /*******************************************/
+ CLA_INB_LIST_BASEL = 0x500,
+ CLA_INB_LIST_BASEH = 0x504,
+ CLA_INB_AVAL_COUNT_BASEL = 0x508,
+ CLA_INB_AVAL_COUNT_BASEH = 0x50C,
+ CLA_INB_DESTI_LIST_BASEL = 0x510,
+ CLA_INB_DESTI_LIST_BASEH = 0x514,
+ CLA_INB_WRITE_POINTER = 0x518,
+ CLA_INB_READ_POINTER = 0x51C,
+
+ CLA_OUTB_LIST_BASEL = 0x530,
+ CLA_OUTB_LIST_BASEH = 0x534,
+ CLA_OUTB_SOURCE_LIST_BASEL = 0x538,
+ CLA_OUTB_SOURCE_LIST_BASEH = 0x53C,
+ CLA_OUTB_COPY_POINTER = 0x544,
+ CLA_OUTB_READ_POINTER = 0x548,
+
+ CLA_ISR_CAUSE = 0x560,
+ CLA_ISR_MASK = 0x564,
+
+ INT_MAP_MU = (INT_MAP_DL_CPU2PCIEA | INT_MAP_COMAINT),
+
+ CL_POINTER_TOGGLE = 1 << 12,
+
+ CLIC_IN_IRQ = 1 << 0,
+ CLIC_OUT_IRQ = 1 << 1,
+ CLIC_IN_ERR_IRQ = 1 << 8,
+ CLIC_OUT_ERR_IRQ = 1 << 12,
+
+ CL_SLOT_NUM_MASK = 0xFFF,
+
+ /*
+ * Command flag is the flag for the CDB command itself
+ */
+ /* 1-non data; 0-data command */
+ CMD_FLAG_NON_DATA = 1 << 0,
+ CMD_FLAG_DMA = 1 << 1,
+ CMD_FLAG_PIO = 1 << 2,
+ /* 1-host read data */
+ CMD_FLAG_DATA_IN = 1 << 3,
+ /* 1-host write data */
+ CMD_FLAG_DATA_OUT = 1 << 4,
+
+ SCSI_CMD_MARVELL_SPECIFIC = 0xE1,
+ CDB_CORE_SHUTDOWN = 0xB,
+};
+
+#define APICDB0_EVENT 0xF4
+#define APICDB1_EVENT_GETEVENT 0
+#define MAX_EVENTS_RETURNED 6
+
+struct mvumi_driver_event {
+ u32 time_stamp;
+ u32 sequence_no;
+ u32 event_id;
+ u8 severity;
+ u8 param_count;
+ u16 device_id;
+ u32 params[4];
+ u8 sense_data_length;
+ u8 Reserved1;
+ u8 sense_data[30];
+};
+
+struct mvumi_event_req {
+ unsigned char count;
+ unsigned char reserved[3];
+ struct mvumi_driver_event events[MAX_EVENTS_RETURNED];
+};
+
+struct mvumi_events_wq {
+ struct work_struct work_q;
+ struct mvumi_hba *mhba;
+ unsigned int event;
+ void *param;
+};
+
+#define MVUMI_MAX_SG_ENTRY 32
+#define SGD_EOT (1L << 27)
+
+struct mvumi_sgl {
+ u32 baseaddr_l;
+ u32 baseaddr_h;
+ u32 flags;
+ u32 size;
+};
+
+struct mvumi_res {
+ struct list_head entry;
+ dma_addr_t bus_addr;
+ void *virt_addr;
+ unsigned int size;
+ unsigned short type; /* enum Resource_Type */
+};
+
+/* Resource type */
+enum resource_type {
+ RESOURCE_CACHED_MEMORY = 0,
+ RESOURCE_UNCACHED_MEMORY
+};
+
+struct mvumi_sense_data {
+ u8 error_eode:7;
+ u8 valid:1;
+ u8 segment_number;
+ u8 sense_key:4;
+ u8 reserved:1;
+ u8 incorrect_length:1;
+ u8 end_of_media:1;
+ u8 file_mark:1;
+ u8 information[4];
+ u8 additional_sense_length;
+ u8 command_specific_information[4];
+ u8 additional_sense_code;
+ u8 additional_sense_code_qualifier;
+ u8 field_replaceable_unit_code;
+ u8 sense_key_specific[3];
+};
+
+/* Request initiator must set the status to REQ_STATUS_PENDING. */
+#define REQ_STATUS_PENDING 0x80
+
+struct mvumi_cmd {
+ struct list_head queue_pointer;
+ struct mvumi_msg_frame *frame;
+ struct scsi_cmnd *scmd;
+ atomic_t sync_cmd;
+ void *data_buf;
+ unsigned short request_id;
+ unsigned char cmd_status;
+};
+
+/*
+ * the function type of the in bound frame
+ */
+#define CL_FUN_SCSI_CMD 0x1
+
+struct mvumi_msg_frame {
+ u16 device_id;
+ u16 tag;
+ u8 cmd_flag;
+ u8 req_function;
+ u8 cdb_length;
+ u8 sg_counts;
+ u32 data_transfer_length;
+ u16 request_id;
+ u16 reserved1;
+ u8 cdb[MAX_COMMAND_SIZE];
+ u32 payload[1];
+};
+
+/*
+ * the respond flag for data_payload of the out bound frame
+ */
+#define CL_RSP_FLAG_NODATA 0x0
+#define CL_RSP_FLAG_SENSEDATA 0x1
+
+struct mvumi_rsp_frame {
+ u16 device_id;
+ u16 tag;
+ u8 req_status;
+ u8 rsp_flag; /* Indicates the type of Data_Payload.*/
+ u16 request_id;
+ u32 payload[1];
+};
+
+struct mvumi_ob_data {
+ struct list_head list;
+ unsigned char data[0];
+};
+
+struct version_info {
+ u32 ver_major;
+ u32 ver_minor;
+ u32 ver_oem;
+ u32 ver_build;
+};
+
+#define FW_MAX_DELAY 30
+#define MVUMI_FW_BUSY (1U << 0)
+#define MVUMI_FW_ATTACH (1U << 1)
+#define MVUMI_FW_ALLOC (1U << 2)
+
+/*
+ * State is the state of the MU
+ */
+#define FW_STATE_IDLE 0
+#define FW_STATE_STARTING 1
+#define FW_STATE_HANDSHAKING 2
+#define FW_STATE_STARTED 3
+#define FW_STATE_ABORT 4
+
+#define HANDSHAKE_SIGNATURE 0x5A5A5A5AL
+#define HANDSHAKE_READYSTATE 0x55AA5AA5L
+#define HANDSHAKE_DONESTATE 0x55AAA55AL
+
+/* HandShake Status definition */
+#define HS_STATUS_OK 1
+#define HS_STATUS_ERR 2
+#define HS_STATUS_INVALID 3
+
+/* HandShake State/Cmd definition */
+#define HS_S_START 1
+#define HS_S_RESET 2
+#define HS_S_PAGE_ADDR 3
+#define HS_S_QUERY_PAGE 4
+#define HS_S_SEND_PAGE 5
+#define HS_S_END 6
+#define HS_S_ABORT 7
+#define HS_PAGE_VERIFY_SIZE 128
+
+#define HS_GET_STATE(a) (a & 0xFFFF)
+#define HS_GET_STATUS(a) ((a & 0xFFFF0000) >> 16)
+#define HS_SET_STATE(a, b) (a |= (b & 0xFFFF))
+#define HS_SET_STATUS(a, b) (a |= ((b & 0xFFFF) << 16))
+
+/* handshake frame */
+struct mvumi_hs_frame {
+ u16 size;
+ /* host information */
+ u8 host_type;
+ u8 reserved_1[1];
+ struct version_info host_ver; /* bios or driver version */
+
+ /* controller information */
+ u32 system_io_bus;
+ u32 slot_number;
+ u32 intr_level;
+ u32 intr_vector;
+
+ /* communication list configuration */
+ u32 ib_baseaddr_l;
+ u32 ib_baseaddr_h;
+ u32 ob_baseaddr_l;
+ u32 ob_baseaddr_h;
+
+ u8 ib_entry_size;
+ u8 ob_entry_size;
+ u8 ob_depth;
+ u8 ib_depth;
+
+ /* system time */
+ u64 seconds_since1970;
+};
+
+struct mvumi_hs_header {
+ u8 page_code;
+ u8 checksum;
+ u16 frame_length;
+ u32 frame_content[1];
+};
+
+/*
+ * the page code type of the handshake header
+ */
+#define HS_PAGE_FIRM_CAP 0x1
+#define HS_PAGE_HOST_INFO 0x2
+#define HS_PAGE_FIRM_CTL 0x3
+#define HS_PAGE_CL_INFO 0x4
+#define HS_PAGE_TOTAL 0x5
+
+#define HSP_SIZE(i) sizeof(struct mvumi_hs_page##i)
+
+#define HSP_MAX_SIZE ({ \
+ int size, m1, m2; \
+ m1 = max(HSP_SIZE(1), HSP_SIZE(3)); \
+ m2 = max(HSP_SIZE(2), HSP_SIZE(4)); \
+ size = max(m1, m2); \
+ size; \
+})
+
+/* The format of the page code for Firmware capability */
+struct mvumi_hs_page1 {
+ u8 pagecode;
+ u8 checksum;
+ u16 frame_length;
+
+ u16 number_of_ports;
+ u16 max_devices_support;
+ u16 max_io_support;
+ u16 umi_ver;
+ u32 max_transfer_size;
+ struct version_info fw_ver;
+ u8 cl_in_max_entry_size;
+ u8 cl_out_max_entry_size;
+ u8 cl_inout_list_depth;
+ u8 total_pages;
+ u16 capability;
+ u16 reserved1;
+};
+
+/* The format of the page code for Host information */
+struct mvumi_hs_page2 {
+ u8 pagecode;
+ u8 checksum;
+ u16 frame_length;
+
+ u8 host_type;
+ u8 reserved[3];
+ struct version_info host_ver;
+ u32 system_io_bus;
+ u32 slot_number;
+ u32 intr_level;
+ u32 intr_vector;
+ u64 seconds_since1970;
+};
+
+/* The format of the page code for firmware control */
+struct mvumi_hs_page3 {
+ u8 pagecode;
+ u8 checksum;
+ u16 frame_length;
+ u16 control;
+ u8 reserved[2];
+ u32 host_bufferaddr_l;
+ u32 host_bufferaddr_h;
+ u32 host_eventaddr_l;
+ u32 host_eventaddr_h;
+};
+
+struct mvumi_hs_page4 {
+ u8 pagecode;
+ u8 checksum;
+ u16 frame_length;
+ u32 ib_baseaddr_l;
+ u32 ib_baseaddr_h;
+ u32 ob_baseaddr_l;
+ u32 ob_baseaddr_h;
+ u8 ib_entry_size;
+ u8 ob_entry_size;
+ u8 ob_depth;
+ u8 ib_depth;
+};
+
+struct mvumi_tag {
+ unsigned short *stack;
+ unsigned short top;
+ unsigned short size;
+};
+
+struct mvumi_hba {
+ void *base_addr[MAX_BASE_ADDRESS];
+ void *mmio;
+ struct list_head cmd_pool;
+ struct Scsi_Host *shost;
+ wait_queue_head_t int_cmd_wait_q;
+ struct pci_dev *pdev;
+ unsigned int unique_id;
+ atomic_t fw_outstanding;
+ struct mvumi_instance_template *instancet;
+
+ void *ib_list;
+ dma_addr_t ib_list_phys;
+
+ void *ob_list;
+ dma_addr_t ob_list_phys;
+
+ void *ib_shadow;
+ dma_addr_t ib_shadow_phys;
+
+ void *ob_shadow;
+ dma_addr_t ob_shadow_phys;
+
+ void *handshake_page;
+ dma_addr_t handshake_page_phys;
+
+ unsigned int global_isr;
+ unsigned int isr_status;
+
+ unsigned short max_sge;
+ unsigned short max_target_id;
+ unsigned char *target_map;
+ unsigned int max_io;
+ unsigned int list_num_io;
+ unsigned int ib_max_size;
+ unsigned int ob_max_size;
+ unsigned int ib_max_size_setting;
+ unsigned int ob_max_size_setting;
+ unsigned int max_transfer_size;
+ unsigned char hba_total_pages;
+ unsigned char fw_flag;
+ unsigned char request_id_enabled;
+ unsigned short hba_capability;
+ unsigned short io_seq;
+
+ unsigned int ib_cur_slot;
+ unsigned int ob_cur_slot;
+ unsigned int fw_state;
+
+ struct list_head ob_data_list;
+ struct list_head free_ob_list;
+ struct list_head res_list;
+ struct list_head waiting_req_list;
+
+ struct mvumi_tag tag_pool;
+ struct mvumi_cmd **tag_cmd;
+};
+
+struct mvumi_instance_template {
+ void (*fire_cmd)(struct mvumi_hba *, struct mvumi_cmd *);
+ void (*enable_intr)(void *) ;
+ void (*disable_intr)(void *);
+ int (*clear_intr)(void *);
+ unsigned int (*read_fw_status_reg)(void *);
+};
+
+extern struct timezone sys_tz;
+#endif
diff --git a/drivers/scsi/qla4xxx/ql4_bsg.c b/drivers/scsi/qla4xxx/ql4_bsg.c
new file mode 100644
index 0000000..8acdc58
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_bsg.c
@@ -0,0 +1,513 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c) 2011 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+
+#include "ql4_def.h"
+#include "ql4_glbl.h"
+#include "ql4_bsg.h"
+
+static int
+qla4xxx_read_flash(struct bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
+ struct scsi_qla_host *ha = to_qla_host(host);
+ struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
+ struct iscsi_bsg_request *bsg_req = bsg_job->request;
+ uint32_t offset = 0;
+ uint32_t length = 0;
+ dma_addr_t flash_dma;
+ uint8_t *flash = NULL;
+ int rval = -EINVAL;
+
+ bsg_reply->reply_payload_rcv_len = 0;
+
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ goto leave;
+
+ if (ql4xxx_reset_active(ha)) {
+ ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
+ rval = -EBUSY;
+ goto leave;
+ }
+
+ if (ha->flash_state != QLFLASH_WAITING) {
+ ql4_printk(KERN_ERR, ha, "%s: another flash operation "
+ "active\n", __func__);
+ rval = -EBUSY;
+ goto leave;
+ }
+
+ ha->flash_state = QLFLASH_READING;
+ offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
+ length = bsg_job->reply_payload.payload_len;
+
+ flash = dma_alloc_coherent(&ha->pdev->dev, length, &flash_dma,
+ GFP_KERNEL);
+ if (!flash) {
+ ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
+ "data\n", __func__);
+ rval = -ENOMEM;
+ goto leave;
+ }
+
+ rval = qla4xxx_get_flash(ha, flash_dma, offset, length);
+ if (rval) {
+ ql4_printk(KERN_ERR, ha, "%s: get flash failed\n", __func__);
+ bsg_reply->result = DID_ERROR << 16;
+ rval = -EIO;
+ } else {
+ bsg_reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt,
+ flash, length);
+ bsg_reply->result = DID_OK << 16;
+ }
+
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ dma_free_coherent(&ha->pdev->dev, length, flash, flash_dma);
+leave:
+ ha->flash_state = QLFLASH_WAITING;
+ return rval;
+}
+
+static int
+qla4xxx_update_flash(struct bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
+ struct scsi_qla_host *ha = to_qla_host(host);
+ struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
+ struct iscsi_bsg_request *bsg_req = bsg_job->request;
+ uint32_t length = 0;
+ uint32_t offset = 0;
+ uint32_t options = 0;
+ dma_addr_t flash_dma;
+ uint8_t *flash = NULL;
+ int rval = -EINVAL;
+
+ bsg_reply->reply_payload_rcv_len = 0;
+
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ goto leave;
+
+ if (ql4xxx_reset_active(ha)) {
+ ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
+ rval = -EBUSY;
+ goto leave;
+ }
+
+ if (ha->flash_state != QLFLASH_WAITING) {
+ ql4_printk(KERN_ERR, ha, "%s: another flash operation "
+ "active\n", __func__);
+ rval = -EBUSY;
+ goto leave;
+ }
+
+ ha->flash_state = QLFLASH_WRITING;
+ length = bsg_job->request_payload.payload_len;
+ offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
+ options = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
+
+ flash = dma_alloc_coherent(&ha->pdev->dev, length, &flash_dma,
+ GFP_KERNEL);
+ if (!flash) {
+ ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
+ "data\n", __func__);
+ rval = -ENOMEM;
+ goto leave;
+ }
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, flash, length);
+
+ rval = qla4xxx_set_flash(ha, flash_dma, offset, length, options);
+ if (rval) {
+ ql4_printk(KERN_ERR, ha, "%s: set flash failed\n", __func__);
+ bsg_reply->result = DID_ERROR << 16;
+ rval = -EIO;
+ } else
+ bsg_reply->result = DID_OK << 16;
+
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ dma_free_coherent(&ha->pdev->dev, length, flash, flash_dma);
+leave:
+ ha->flash_state = QLFLASH_WAITING;
+ return rval;
+}
+
+static int
+qla4xxx_get_acb_state(struct bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
+ struct scsi_qla_host *ha = to_qla_host(host);
+ struct iscsi_bsg_request *bsg_req = bsg_job->request;
+ struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
+ uint32_t status[MBOX_REG_COUNT];
+ uint32_t acb_idx;
+ uint32_t ip_idx;
+ int rval = -EINVAL;
+
+ bsg_reply->reply_payload_rcv_len = 0;
+
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ goto leave;
+
+ /* Only 4022 and above adapters are supported */
+ if (is_qla4010(ha))
+ goto leave;
+
+ if (ql4xxx_reset_active(ha)) {
+ ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
+ rval = -EBUSY;
+ goto leave;
+ }
+
+ if (bsg_job->reply_payload.payload_len < sizeof(status)) {
+ ql4_printk(KERN_ERR, ha, "%s: invalid payload len %d\n",
+ __func__, bsg_job->reply_payload.payload_len);
+ rval = -EINVAL;
+ goto leave;
+ }
+
+ acb_idx = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
+ ip_idx = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
+
+ rval = qla4xxx_get_ip_state(ha, acb_idx, ip_idx, status);
+ if (rval) {
+ ql4_printk(KERN_ERR, ha, "%s: get ip state failed\n",
+ __func__);
+ bsg_reply->result = DID_ERROR << 16;
+ rval = -EIO;
+ } else {
+ bsg_reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt,
+ status, sizeof(status));
+ bsg_reply->result = DID_OK << 16;
+ }
+
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+leave:
+ return rval;
+}
+
+static int
+qla4xxx_read_nvram(struct bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
+ struct scsi_qla_host *ha = to_qla_host(host);
+ struct iscsi_bsg_request *bsg_req = bsg_job->request;
+ struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
+ uint32_t offset = 0;
+ uint32_t len = 0;
+ uint32_t total_len = 0;
+ dma_addr_t nvram_dma;
+ uint8_t *nvram = NULL;
+ int rval = -EINVAL;
+
+ bsg_reply->reply_payload_rcv_len = 0;
+
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ goto leave;
+
+ /* Only 40xx adapters are supported */
+ if (!(is_qla4010(ha) || is_qla4022(ha) || is_qla4032(ha)))
+ goto leave;
+
+ if (ql4xxx_reset_active(ha)) {
+ ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
+ rval = -EBUSY;
+ goto leave;
+ }
+
+ offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
+ len = bsg_job->reply_payload.payload_len;
+ total_len = offset + len;
+
+ /* total len should not be greater than max NVRAM size */
+ if ((is_qla4010(ha) && total_len > QL4010_NVRAM_SIZE) ||
+ ((is_qla4022(ha) || is_qla4032(ha)) &&
+ total_len > QL40X2_NVRAM_SIZE)) {
+ ql4_printk(KERN_ERR, ha, "%s: offset+len greater than max"
+ " nvram size, offset=%d len=%d\n",
+ __func__, offset, len);
+ goto leave;
+ }
+
+ nvram = dma_alloc_coherent(&ha->pdev->dev, len, &nvram_dma,
+ GFP_KERNEL);
+ if (!nvram) {
+ ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for nvram "
+ "data\n", __func__);
+ rval = -ENOMEM;
+ goto leave;
+ }
+
+ rval = qla4xxx_get_nvram(ha, nvram_dma, offset, len);
+ if (rval) {
+ ql4_printk(KERN_ERR, ha, "%s: get nvram failed\n", __func__);
+ bsg_reply->result = DID_ERROR << 16;
+ rval = -EIO;
+ } else {
+ bsg_reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt,
+ nvram, len);
+ bsg_reply->result = DID_OK << 16;
+ }
+
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ dma_free_coherent(&ha->pdev->dev, len, nvram, nvram_dma);
+leave:
+ return rval;
+}
+
+static int
+qla4xxx_update_nvram(struct bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
+ struct scsi_qla_host *ha = to_qla_host(host);
+ struct iscsi_bsg_request *bsg_req = bsg_job->request;
+ struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
+ uint32_t offset = 0;
+ uint32_t len = 0;
+ uint32_t total_len = 0;
+ dma_addr_t nvram_dma;
+ uint8_t *nvram = NULL;
+ int rval = -EINVAL;
+
+ bsg_reply->reply_payload_rcv_len = 0;
+
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ goto leave;
+
+ if (!(is_qla4010(ha) || is_qla4022(ha) || is_qla4032(ha)))
+ goto leave;
+
+ if (ql4xxx_reset_active(ha)) {
+ ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
+ rval = -EBUSY;
+ goto leave;
+ }
+
+ offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
+ len = bsg_job->request_payload.payload_len;
+ total_len = offset + len;
+
+ /* total len should not be greater than max NVRAM size */
+ if ((is_qla4010(ha) && total_len > QL4010_NVRAM_SIZE) ||
+ ((is_qla4022(ha) || is_qla4032(ha)) &&
+ total_len > QL40X2_NVRAM_SIZE)) {
+ ql4_printk(KERN_ERR, ha, "%s: offset+len greater than max"
+ " nvram size, offset=%d len=%d\n",
+ __func__, offset, len);
+ goto leave;
+ }
+
+ nvram = dma_alloc_coherent(&ha->pdev->dev, len, &nvram_dma,
+ GFP_KERNEL);
+ if (!nvram) {
+ ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash "
+ "data\n", __func__);
+ rval = -ENOMEM;
+ goto leave;
+ }
+
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, nvram, len);
+
+ rval = qla4xxx_set_nvram(ha, nvram_dma, offset, len);
+ if (rval) {
+ ql4_printk(KERN_ERR, ha, "%s: set nvram failed\n", __func__);
+ bsg_reply->result = DID_ERROR << 16;
+ rval = -EIO;
+ } else
+ bsg_reply->result = DID_OK << 16;
+
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ dma_free_coherent(&ha->pdev->dev, len, nvram, nvram_dma);
+leave:
+ return rval;
+}
+
+static int
+qla4xxx_restore_defaults(struct bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
+ struct scsi_qla_host *ha = to_qla_host(host);
+ struct iscsi_bsg_request *bsg_req = bsg_job->request;
+ struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
+ uint32_t region = 0;
+ uint32_t field0 = 0;
+ uint32_t field1 = 0;
+ int rval = -EINVAL;
+
+ bsg_reply->reply_payload_rcv_len = 0;
+
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ goto leave;
+
+ if (is_qla4010(ha))
+ goto leave;
+
+ if (ql4xxx_reset_active(ha)) {
+ ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
+ rval = -EBUSY;
+ goto leave;
+ }
+
+ region = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
+ field0 = bsg_req->rqst_data.h_vendor.vendor_cmd[2];
+ field1 = bsg_req->rqst_data.h_vendor.vendor_cmd[3];
+
+ rval = qla4xxx_restore_factory_defaults(ha, region, field0, field1);
+ if (rval) {
+ ql4_printk(KERN_ERR, ha, "%s: set nvram failed\n", __func__);
+ bsg_reply->result = DID_ERROR << 16;
+ rval = -EIO;
+ } else
+ bsg_reply->result = DID_OK << 16;
+
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+leave:
+ return rval;
+}
+
+static int
+qla4xxx_bsg_get_acb(struct bsg_job *bsg_job)
+{
+ struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
+ struct scsi_qla_host *ha = to_qla_host(host);
+ struct iscsi_bsg_request *bsg_req = bsg_job->request;
+ struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
+ uint32_t acb_type = 0;
+ uint32_t len = 0;
+ dma_addr_t acb_dma;
+ uint8_t *acb = NULL;
+ int rval = -EINVAL;
+
+ bsg_reply->reply_payload_rcv_len = 0;
+
+ if (unlikely(pci_channel_offline(ha->pdev)))
+ goto leave;
+
+ /* Only 4022 and above adapters are supported */
+ if (is_qla4010(ha))
+ goto leave;
+
+ if (ql4xxx_reset_active(ha)) {
+ ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__);
+ rval = -EBUSY;
+ goto leave;
+ }
+
+ acb_type = bsg_req->rqst_data.h_vendor.vendor_cmd[1];
+ len = bsg_job->reply_payload.payload_len;
+ if (len < sizeof(struct addr_ctrl_blk)) {
+ ql4_printk(KERN_ERR, ha, "%s: invalid acb len %d\n",
+ __func__, len);
+ rval = -EINVAL;
+ goto leave;
+ }
+
+ acb = dma_alloc_coherent(&ha->pdev->dev, len, &acb_dma, GFP_KERNEL);
+ if (!acb) {
+ ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for acb "
+ "data\n", __func__);
+ rval = -ENOMEM;
+ goto leave;
+ }
+
+ rval = qla4xxx_get_acb(ha, acb_dma, acb_type, len);
+ if (rval) {
+ ql4_printk(KERN_ERR, ha, "%s: get acb failed\n", __func__);
+ bsg_reply->result = DID_ERROR << 16;
+ rval = -EIO;
+ } else {
+ bsg_reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt,
+ acb, len);
+ bsg_reply->result = DID_OK << 16;
+ }
+
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ dma_free_coherent(&ha->pdev->dev, len, acb, acb_dma);
+leave:
+ return rval;
+}
+
+/**
+ * qla4xxx_process_vendor_specific - handle vendor specific bsg request
+ * @job: iscsi_bsg_job to handle
+ **/
+int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job)
+{
+ struct iscsi_bsg_reply *bsg_reply = bsg_job->reply;
+ struct iscsi_bsg_request *bsg_req = bsg_job->request;
+ struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
+ struct scsi_qla_host *ha = to_qla_host(host);
+
+ switch (bsg_req->rqst_data.h_vendor.vendor_cmd[0]) {
+ case QLISCSI_VND_READ_FLASH:
+ return qla4xxx_read_flash(bsg_job);
+
+ case QLISCSI_VND_UPDATE_FLASH:
+ return qla4xxx_update_flash(bsg_job);
+
+ case QLISCSI_VND_GET_ACB_STATE:
+ return qla4xxx_get_acb_state(bsg_job);
+
+ case QLISCSI_VND_READ_NVRAM:
+ return qla4xxx_read_nvram(bsg_job);
+
+ case QLISCSI_VND_UPDATE_NVRAM:
+ return qla4xxx_update_nvram(bsg_job);
+
+ case QLISCSI_VND_RESTORE_DEFAULTS:
+ return qla4xxx_restore_defaults(bsg_job);
+
+ case QLISCSI_VND_GET_ACB:
+ return qla4xxx_bsg_get_acb(bsg_job);
+
+ default:
+ ql4_printk(KERN_ERR, ha, "%s: invalid BSG vendor command: "
+ "0x%x\n", __func__, bsg_req->msgcode);
+ bsg_reply->result = (DID_ERROR << 16);
+ bsg_reply->reply_payload_rcv_len = 0;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ return -ENOSYS;
+ }
+}
+
+/**
+ * qla4xxx_bsg_request - handle bsg request from ISCSI transport
+ * @job: iscsi_bsg_job to handle
+ */
+int qla4xxx_bsg_request(struct bsg_job *bsg_job)
+{
+ struct iscsi_bsg_request *bsg_req = bsg_job->request;
+ struct Scsi_Host *host = iscsi_job_to_shost(bsg_job);
+ struct scsi_qla_host *ha = to_qla_host(host);
+
+ switch (bsg_req->msgcode) {
+ case ISCSI_BSG_HST_VENDOR:
+ return qla4xxx_process_vendor_specific(bsg_job);
+
+ default:
+ ql4_printk(KERN_ERR, ha, "%s: invalid BSG command: 0x%x\n",
+ __func__, bsg_req->msgcode);
+ }
+
+ return -ENOSYS;
+}
diff --git a/drivers/scsi/qla4xxx/ql4_bsg.h b/drivers/scsi/qla4xxx/ql4_bsg.h
new file mode 100644
index 0000000..c6a03645
--- /dev/null
+++ b/drivers/scsi/qla4xxx/ql4_bsg.h
@@ -0,0 +1,19 @@
+/*
+ * QLogic iSCSI HBA Driver
+ * Copyright (c) 2011 QLogic Corporation
+ *
+ * See LICENSE.qla4xxx for copyright and licensing details.
+ */
+#ifndef __QL4_BSG_H
+#define __QL4_BSG_H
+
+/* BSG Vendor specific commands */
+#define QLISCSI_VND_READ_FLASH 1
+#define QLISCSI_VND_UPDATE_FLASH 2
+#define QLISCSI_VND_GET_ACB_STATE 3
+#define QLISCSI_VND_READ_NVRAM 4
+#define QLISCSI_VND_UPDATE_NVRAM 5
+#define QLISCSI_VND_RESTORE_DEFAULTS 6
+#define QLISCSI_VND_GET_ACB 7
+
+#endif