diff options
Diffstat (limited to 'drivers/scsi/qla2xxx/qla_iocb.c')
-rw-r--r-- | drivers/scsi/qla2xxx/qla_iocb.c | 242 |
1 files changed, 93 insertions, 149 deletions
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index c5b3c61..c71863f 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -155,6 +155,8 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt, uint32_t *cur_dsd; scsi_qla_host_t *ha; struct scsi_cmnd *cmd; + struct scatterlist *sg; + int i; cmd = sp->cmd; @@ -163,7 +165,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt, __constant_cpu_to_le32(COMMAND_TYPE); /* No data transfer */ - if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) { + if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { cmd_pkt->byte_count = __constant_cpu_to_le32(0); return; } @@ -177,35 +179,23 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt, cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address; /* Load data segments */ - if (cmd->use_sg != 0) { - struct scatterlist *cur_seg; - struct scatterlist *end_seg; - - cur_seg = (struct scatterlist *)cmd->request_buffer; - end_seg = cur_seg + tot_dsds; - while (cur_seg < end_seg) { - cont_entry_t *cont_pkt; - - /* Allocate additional continuation packets? */ - if (avail_dsds == 0) { - /* - * Seven DSDs are available in the Continuation - * Type 0 IOCB. - */ - cont_pkt = qla2x00_prep_cont_type0_iocb(ha); - cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address; - avail_dsds = 7; - } - - *cur_dsd++ = cpu_to_le32(sg_dma_address(cur_seg)); - *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg)); - avail_dsds--; - - cur_seg++; + scsi_for_each_sg(cmd, sg, tot_dsds, i) { + cont_entry_t *cont_pkt; + + /* Allocate additional continuation packets? */ + if (avail_dsds == 0) { + /* + * Seven DSDs are available in the Continuation + * Type 0 IOCB. + */ + cont_pkt = qla2x00_prep_cont_type0_iocb(ha); + cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address; + avail_dsds = 7; } - } else { - *cur_dsd++ = cpu_to_le32(sp->dma_handle); - *cur_dsd++ = cpu_to_le32(cmd->request_bufflen); + + *cur_dsd++ = cpu_to_le32(sg_dma_address(sg)); + *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); + avail_dsds--; } } @@ -224,6 +214,8 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt, uint32_t *cur_dsd; scsi_qla_host_t *ha; struct scsi_cmnd *cmd; + struct scatterlist *sg; + int i; cmd = sp->cmd; @@ -232,7 +224,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt, __constant_cpu_to_le32(COMMAND_A64_TYPE); /* No data transfer */ - if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) { + if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { cmd_pkt->byte_count = __constant_cpu_to_le32(0); return; } @@ -246,39 +238,26 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt, cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address; /* Load data segments */ - if (cmd->use_sg != 0) { - struct scatterlist *cur_seg; - struct scatterlist *end_seg; - - cur_seg = (struct scatterlist *)cmd->request_buffer; - end_seg = cur_seg + tot_dsds; - while (cur_seg < end_seg) { - dma_addr_t sle_dma; - cont_a64_entry_t *cont_pkt; - - /* Allocate additional continuation packets? */ - if (avail_dsds == 0) { - /* - * Five DSDs are available in the Continuation - * Type 1 IOCB. - */ - cont_pkt = qla2x00_prep_cont_type1_iocb(ha); - cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; - avail_dsds = 5; - } - - sle_dma = sg_dma_address(cur_seg); - *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); - *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); - *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg)); - avail_dsds--; - - cur_seg++; + scsi_for_each_sg(cmd, sg, tot_dsds, i) { + dma_addr_t sle_dma; + cont_a64_entry_t *cont_pkt; + + /* Allocate additional continuation packets? */ + if (avail_dsds == 0) { + /* + * Five DSDs are available in the Continuation + * Type 1 IOCB. + */ + cont_pkt = qla2x00_prep_cont_type1_iocb(ha); + cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; + avail_dsds = 5; } - } else { - *cur_dsd++ = cpu_to_le32(LSD(sp->dma_handle)); - *cur_dsd++ = cpu_to_le32(MSD(sp->dma_handle)); - *cur_dsd++ = cpu_to_le32(cmd->request_bufflen); + + sle_dma = sg_dma_address(sg); + *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); + *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); + *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); + avail_dsds--; } } @@ -291,7 +270,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt, int qla2x00_start_scsi(srb_t *sp) { - int ret; + int ret, nseg; unsigned long flags; scsi_qla_host_t *ha; struct scsi_cmnd *cmd; @@ -299,7 +278,6 @@ qla2x00_start_scsi(srb_t *sp) uint32_t index; uint32_t handle; cmd_entry_t *cmd_pkt; - struct scatterlist *sg; uint16_t cnt; uint16_t req_cnt; uint16_t tot_dsds; @@ -337,23 +315,15 @@ qla2x00_start_scsi(srb_t *sp) goto queuing_error; /* Map the sg table so we have an accurate count of sg entries needed */ - if (cmd->use_sg) { - sg = (struct scatterlist *) cmd->request_buffer; - tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg, - cmd->sc_data_direction); - if (tot_dsds == 0) - goto queuing_error; - } else if (cmd->request_bufflen) { - dma_addr_t req_dma; - - req_dma = pci_map_single(ha->pdev, cmd->request_buffer, - cmd->request_bufflen, cmd->sc_data_direction); - if (dma_mapping_error(req_dma)) + if (scsi_sg_count(cmd)) { + nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), + scsi_sg_count(cmd), cmd->sc_data_direction); + if (unlikely(!nseg)) goto queuing_error; + } else + nseg = 0; - sp->dma_handle = req_dma; - tot_dsds = 1; - } + tot_dsds = nseg; /* Calculate the number of request entries needed. */ req_cnt = ha->isp_ops.calc_req_entries(tot_dsds); @@ -391,7 +361,7 @@ qla2x00_start_scsi(srb_t *sp) /* Load SCSI command packet. */ memcpy(cmd_pkt->scsi_cdb, cmd->cmnd, cmd->cmd_len); - cmd_pkt->byte_count = cpu_to_le32((uint32_t)cmd->request_bufflen); + cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); /* Build IOCB segments */ ha->isp_ops.build_iocbs(sp, cmd_pkt, tot_dsds); @@ -423,14 +393,9 @@ qla2x00_start_scsi(srb_t *sp) return (QLA_SUCCESS); queuing_error: - if (cmd->use_sg && tot_dsds) { - sg = (struct scatterlist *) cmd->request_buffer; - pci_unmap_sg(ha->pdev, sg, cmd->use_sg, - cmd->sc_data_direction); - } else if (tot_dsds) { - pci_unmap_single(ha->pdev, sp->dma_handle, - cmd->request_bufflen, cmd->sc_data_direction); - } + if (tot_dsds) + scsi_dma_unmap(cmd); + spin_unlock_irqrestore(&ha->hardware_lock, flags); return (QLA_FUNCTION_FAILED); @@ -453,9 +418,10 @@ __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun, { mrk_entry_t *mrk; struct mrk_entry_24xx *mrk24; + scsi_qla_host_t *pha = to_qla_parent(ha); mrk24 = NULL; - mrk = (mrk_entry_t *)qla2x00_req_pkt(ha); + mrk = (mrk_entry_t *)qla2x00_req_pkt(pha); if (mrk == NULL) { DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n", __func__, ha->host_no)); @@ -472,6 +438,7 @@ __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun, mrk24->lun[1] = LSB(lun); mrk24->lun[2] = MSB(lun); host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun)); + mrk24->vp_index = ha->vp_idx; } else { SET_TARGET_ID(ha, mrk->target, loop_id); mrk->lun = cpu_to_le16(lun); @@ -479,7 +446,7 @@ __qla2x00_marker(scsi_qla_host_t *ha, uint16_t loop_id, uint16_t lun, } wmb(); - qla2x00_isp_cmd(ha); + qla2x00_isp_cmd(pha); return (QLA_SUCCESS); } @@ -642,6 +609,8 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, uint32_t *cur_dsd; scsi_qla_host_t *ha; struct scsi_cmnd *cmd; + struct scatterlist *sg; + int i; cmd = sp->cmd; @@ -650,7 +619,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, __constant_cpu_to_le32(COMMAND_TYPE_7); /* No data transfer */ - if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) { + if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) { cmd_pkt->byte_count = __constant_cpu_to_le32(0); return; } @@ -670,39 +639,27 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, cur_dsd = (uint32_t *)&cmd_pkt->dseg_0_address; /* Load data segments */ - if (cmd->use_sg != 0) { - struct scatterlist *cur_seg; - struct scatterlist *end_seg; - - cur_seg = (struct scatterlist *)cmd->request_buffer; - end_seg = cur_seg + tot_dsds; - while (cur_seg < end_seg) { - dma_addr_t sle_dma; - cont_a64_entry_t *cont_pkt; - - /* Allocate additional continuation packets? */ - if (avail_dsds == 0) { - /* - * Five DSDs are available in the Continuation - * Type 1 IOCB. - */ - cont_pkt = qla2x00_prep_cont_type1_iocb(ha); - cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; - avail_dsds = 5; - } - - sle_dma = sg_dma_address(cur_seg); - *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); - *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); - *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg)); - avail_dsds--; - - cur_seg++; + + scsi_for_each_sg(cmd, sg, tot_dsds, i) { + dma_addr_t sle_dma; + cont_a64_entry_t *cont_pkt; + + /* Allocate additional continuation packets? */ + if (avail_dsds == 0) { + /* + * Five DSDs are available in the Continuation + * Type 1 IOCB. + */ + cont_pkt = qla2x00_prep_cont_type1_iocb(ha); + cur_dsd = (uint32_t *)cont_pkt->dseg_0_address; + avail_dsds = 5; } - } else { - *cur_dsd++ = cpu_to_le32(LSD(sp->dma_handle)); - *cur_dsd++ = cpu_to_le32(MSD(sp->dma_handle)); - *cur_dsd++ = cpu_to_le32(cmd->request_bufflen); + + sle_dma = sg_dma_address(sg); + *cur_dsd++ = cpu_to_le32(LSD(sle_dma)); + *cur_dsd++ = cpu_to_le32(MSD(sle_dma)); + *cur_dsd++ = cpu_to_le32(sg_dma_len(sg)); + avail_dsds--; } } @@ -716,7 +673,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt, int qla24xx_start_scsi(srb_t *sp) { - int ret; + int ret, nseg; unsigned long flags; scsi_qla_host_t *ha; struct scsi_cmnd *cmd; @@ -724,7 +681,6 @@ qla24xx_start_scsi(srb_t *sp) uint32_t index; uint32_t handle; struct cmd_type_7 *cmd_pkt; - struct scatterlist *sg; uint16_t cnt; uint16_t req_cnt; uint16_t tot_dsds; @@ -762,23 +718,15 @@ qla24xx_start_scsi(srb_t *sp) goto queuing_error; /* Map the sg table so we have an accurate count of sg entries needed */ - if (cmd->use_sg) { - sg = (struct scatterlist *) cmd->request_buffer; - tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg, - cmd->sc_data_direction); - if (tot_dsds == 0) - goto queuing_error; - } else if (cmd->request_bufflen) { - dma_addr_t req_dma; - - req_dma = pci_map_single(ha->pdev, cmd->request_buffer, - cmd->request_bufflen, cmd->sc_data_direction); - if (dma_mapping_error(req_dma)) + if (scsi_sg_count(cmd)) { + nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd), + scsi_sg_count(cmd), cmd->sc_data_direction); + if (unlikely(!nseg)) goto queuing_error; + } else + nseg = 0; - sp->dma_handle = req_dma; - tot_dsds = 1; - } + tot_dsds = nseg; req_cnt = qla24xx_calc_iocbs(tot_dsds); if (ha->req_q_cnt < (req_cnt + 2)) { @@ -813,6 +761,7 @@ qla24xx_start_scsi(srb_t *sp) cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; + cmd_pkt->vp_index = sp->fcport->vp_idx; int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun); host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); @@ -821,7 +770,7 @@ qla24xx_start_scsi(srb_t *sp) memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len); host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb)); - cmd_pkt->byte_count = cpu_to_le32((uint32_t)cmd->request_bufflen); + cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd)); /* Build IOCB segments */ qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds); @@ -853,14 +802,9 @@ qla24xx_start_scsi(srb_t *sp) return QLA_SUCCESS; queuing_error: - if (cmd->use_sg && tot_dsds) { - sg = (struct scatterlist *) cmd->request_buffer; - pci_unmap_sg(ha->pdev, sg, cmd->use_sg, - cmd->sc_data_direction); - } else if (tot_dsds) { - pci_unmap_single(ha->pdev, sp->dma_handle, - cmd->request_bufflen, cmd->sc_data_direction); - } + if (tot_dsds) + scsi_dma_unmap(cmd); + spin_unlock_irqrestore(&ha->hardware_lock, flags); return QLA_FUNCTION_FAILED; |