diff options
author | Lennert Buytenhek <buytenh@wantstofly.org> | 2007-10-19 04:11:28 +0200 |
---|---|---|
committer | Dale Farnsworth <dale@farnsworth.org> | 2007-10-23 08:23:07 -0700 |
commit | e4d00fa9bfed733051652a32686b9911e8549ac8 (patch) | |
tree | 459436b87003e24b8fb663bf2507bf4f423fb1c1 /drivers/net/mv643xx_eth.c | |
parent | f9fbbc18dfcdc6156306f475de8b0bb96f97cd0d (diff) | |
download | kernel_samsung_smdk4412-e4d00fa9bfed733051652a32686b9911e8549ac8.zip kernel_samsung_smdk4412-e4d00fa9bfed733051652a32686b9911e8549ac8.tar.gz kernel_samsung_smdk4412-e4d00fa9bfed733051652a32686b9911e8549ac8.tar.bz2 |
mv643xx_eth: Remove MV643XX_ETH_ register prefix
Now that all register address and bit defines are in private
namespace (drivers/net/mv643xx_eth.h), we can safely remove the
MV643XX_ETH_ prefix to conserve horizontal space.
Signed-off-by: Lennert Buytenhek <buytenh@marvell.com>
Acked-by: Tzachi Perelstein <tzachi@marvell.com>
Signed-off-by: Dale Farnsworth <dale@farnsworth.org>
Diffstat (limited to 'drivers/net/mv643xx_eth.c')
-rw-r--r-- | drivers/net/mv643xx_eth.c | 227 |
1 files changed, 107 insertions, 120 deletions
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c index 98b30e5..ca120e5 100644 --- a/drivers/net/mv643xx_eth.c +++ b/drivers/net/mv643xx_eth.c @@ -80,7 +80,7 @@ static char mv643xx_driver_version[] = "1.0"; static void __iomem *mv643xx_eth_base; -/* used to protect MV643XX_ETH_SMI_REG, which is shared across ports */ +/* used to protect SMI_REG, which is shared across ports */ static DEFINE_SPINLOCK(mv643xx_eth_phy_lock); static inline u32 mv_read(int offset) @@ -214,12 +214,12 @@ static void mv643xx_eth_set_rx_mode(struct net_device *dev) struct mv643xx_private *mp = netdev_priv(dev); u32 config_reg; - config_reg = mv_read(MV643XX_ETH_PORT_CONFIG_REG(mp->port_num)); + config_reg = mv_read(PORT_CONFIG_REG(mp->port_num)); if (dev->flags & IFF_PROMISC) - config_reg |= (u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE; + config_reg |= (u32) UNICAST_PROMISCUOUS_MODE; else - config_reg &= ~(u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE; - mv_write(MV643XX_ETH_PORT_CONFIG_REG(mp->port_num), config_reg); + config_reg &= ~(u32) UNICAST_PROMISCUOUS_MODE; + mv_write(PORT_CONFIG_REG(mp->port_num), config_reg); eth_port_set_multicast_list(dev); } @@ -455,41 +455,37 @@ static void mv643xx_eth_update_pscr(struct net_device *dev, u32 o_pscr, n_pscr; unsigned int queues; - o_pscr = mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num)); + o_pscr = mv_read(PORT_SERIAL_CONTROL_REG(port_num)); n_pscr = o_pscr; /* clear speed, duplex and rx buffer size fields */ - n_pscr &= ~(MV643XX_ETH_SET_MII_SPEED_TO_100 | - MV643XX_ETH_SET_GMII_SPEED_TO_1000 | - MV643XX_ETH_SET_FULL_DUPLEX_MODE | - MV643XX_ETH_MAX_RX_PACKET_MASK); + n_pscr &= ~(SET_MII_SPEED_TO_100 | + SET_GMII_SPEED_TO_1000 | + SET_FULL_DUPLEX_MODE | + MAX_RX_PACKET_MASK); if (ecmd->duplex == DUPLEX_FULL) - n_pscr |= MV643XX_ETH_SET_FULL_DUPLEX_MODE; + n_pscr |= SET_FULL_DUPLEX_MODE; if (ecmd->speed == SPEED_1000) - n_pscr |= MV643XX_ETH_SET_GMII_SPEED_TO_1000 | - MV643XX_ETH_MAX_RX_PACKET_9700BYTE; + n_pscr |= SET_GMII_SPEED_TO_1000 | + MAX_RX_PACKET_9700BYTE; else { if (ecmd->speed == SPEED_100) - n_pscr |= MV643XX_ETH_SET_MII_SPEED_TO_100; - n_pscr |= MV643XX_ETH_MAX_RX_PACKET_1522BYTE; + n_pscr |= SET_MII_SPEED_TO_100; + n_pscr |= MAX_RX_PACKET_1522BYTE; } if (n_pscr != o_pscr) { - if ((o_pscr & MV643XX_ETH_SERIAL_PORT_ENABLE) == 0) - mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), - n_pscr); + if ((o_pscr & SERIAL_PORT_ENABLE) == 0) + mv_write(PORT_SERIAL_CONTROL_REG(port_num), n_pscr); else { queues = mv643xx_eth_port_disable_tx(port_num); - o_pscr &= ~MV643XX_ETH_SERIAL_PORT_ENABLE; - mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), - o_pscr); - mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), - n_pscr); - mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), - n_pscr); + o_pscr &= ~SERIAL_PORT_ENABLE; + mv_write(PORT_SERIAL_CONTROL_REG(port_num), o_pscr); + mv_write(PORT_SERIAL_CONTROL_REG(port_num), n_pscr); + mv_write(PORT_SERIAL_CONTROL_REG(port_num), n_pscr); if (queues) mv643xx_eth_port_enable_tx(port_num, queues); } @@ -515,13 +511,13 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id) unsigned int port_num = mp->port_num; /* Read interrupt cause registers */ - eth_int_cause = mv_read(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num)) & + eth_int_cause = mv_read(INTERRUPT_CAUSE_REG(port_num)) & ETH_INT_UNMASK_ALL; if (eth_int_cause & ETH_INT_CAUSE_EXT) { eth_int_cause_ext = mv_read( - MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num)) & + INTERRUPT_CAUSE_EXTEND_REG(port_num)) & ETH_INT_UNMASK_ALL_EXT; - mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), + mv_write(INTERRUPT_CAUSE_EXTEND_REG(port_num), ~eth_int_cause_ext); } @@ -549,10 +545,10 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id) #ifdef MV643XX_NAPI if (eth_int_cause & ETH_INT_CAUSE_RX) { /* schedule the NAPI poll routine to maintain port */ - mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), - ETH_INT_MASK_ALL); + mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); + /* wait for previous write to complete */ - mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); + mv_read(INTERRUPT_MASK_REG(port_num)); netif_rx_schedule(dev, &mp->napi); } @@ -604,9 +600,9 @@ static unsigned int eth_port_set_rx_coal(unsigned int eth_port_num, unsigned int coal = ((t_clk / 1000000) * delay) / 64; /* Set RX Coalescing mechanism */ - mv_write(MV643XX_ETH_SDMA_CONFIG_REG(eth_port_num), + mv_write(SDMA_CONFIG_REG(eth_port_num), ((coal & 0x3fff) << 8) | - (mv_read(MV643XX_ETH_SDMA_CONFIG_REG(eth_port_num)) + (mv_read(SDMA_CONFIG_REG(eth_port_num)) & 0xffc000ff)); return coal; @@ -642,8 +638,7 @@ static unsigned int eth_port_set_tx_coal(unsigned int eth_port_num, unsigned int coal; coal = ((t_clk / 1000000) * delay) / 64; /* Set TX Coalescing mechanism */ - mv_write(MV643XX_ETH_TX_FIFO_URGENT_THRESHOLD_REG(eth_port_num), - coal << 4); + mv_write(TX_FIFO_URGENT_THRESHOLD_REG(eth_port_num), coal << 4); return coal; } @@ -779,10 +774,10 @@ static int mv643xx_eth_open(struct net_device *dev) int err; /* Clear any pending ethernet port interrupts */ - mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); - mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); + mv_write(INTERRUPT_CAUSE_REG(port_num), 0); + mv_write(INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); /* wait for previous write to complete */ - mv_read (MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num)); + mv_read (INTERRUPT_CAUSE_EXTEND_REG(port_num)); err = request_irq(dev->irq, mv643xx_eth_int_handler, IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev); @@ -889,11 +884,10 @@ static int mv643xx_eth_open(struct net_device *dev) eth_port_set_tx_coal(port_num, 133000000, MV643XX_TX_COAL); /* Unmask phy and link status changes interrupts */ - mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num), - ETH_INT_UNMASK_ALL_EXT); + mv_write(INTERRUPT_EXTEND_MASK_REG(port_num), ETH_INT_UNMASK_ALL_EXT); /* Unmask RX buffer and TX end interrupt */ - mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); + mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); return 0; @@ -973,9 +967,9 @@ static int mv643xx_eth_stop(struct net_device *dev) unsigned int port_num = mp->port_num; /* Mask all interrupts on ethernet port */ - mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); + mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); /* wait for previous write to complete */ - mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); + mv_read(INTERRUPT_MASK_REG(port_num)); #ifdef MV643XX_NAPI napi_disable(&mp->napi); @@ -1014,16 +1008,15 @@ static int mv643xx_poll(struct napi_struct *napi, int budget) #endif work_done = 0; - if ((mv_read(MV643XX_ETH_RX_CURRENT_QUEUE_DESC_PTR_0(port_num))) + if ((mv_read(RX_CURRENT_QUEUE_DESC_PTR_0(port_num))) != (u32) mp->rx_used_desc_q) work_done = mv643xx_eth_receive_queue(dev, budget); if (work_done < budget) { netif_rx_complete(dev, napi); - mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); - mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); - mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), - ETH_INT_UNMASK_ALL); + mv_write(INTERRUPT_CAUSE_REG(port_num), 0); + mv_write(INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); + mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); } return work_done; @@ -1226,13 +1219,13 @@ static void mv643xx_netpoll(struct net_device *netdev) struct mv643xx_private *mp = netdev_priv(netdev); int port_num = mp->port_num; - mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); + mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_MASK_ALL); /* wait for previous write to complete */ - mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); + mv_read(INTERRUPT_MASK_REG(port_num)); mv643xx_eth_int_handler(netdev->irq, netdev); - mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); + mv_write(INTERRUPT_MASK_REG(port_num), ETH_INT_UNMASK_ALL); } #endif @@ -1350,8 +1343,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev) /* set default config values */ eth_port_uc_addr_get(port_num, dev->dev_addr); - mp->rx_ring_size = MV643XX_ETH_PORT_DEFAULT_RECEIVE_QUEUE_SIZE; - mp->tx_ring_size = MV643XX_ETH_PORT_DEFAULT_TRANSMIT_QUEUE_SIZE; + mp->rx_ring_size = PORT_DEFAULT_RECEIVE_QUEUE_SIZE; + mp->tx_ring_size = PORT_DEFAULT_TRANSMIT_QUEUE_SIZE; if (is_valid_ether_addr(pd->mac_addr)) memcpy(dev->dev_addr, pd->mac_addr, 6); @@ -1486,8 +1479,8 @@ static void mv643xx_eth_shutdown(struct platform_device *pdev) unsigned int port_num = mp->port_num; /* Mask all interrupts on ethernet port */ - mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 0); - mv_read (MV643XX_ETH_INTERRUPT_MASK_REG(port_num)); + mv_write(INTERRUPT_MASK_REG(port_num), 0); + mv_read (INTERRUPT_MASK_REG(port_num)); eth_port_reset(port_num); } @@ -1754,49 +1747,49 @@ static void eth_port_start(struct net_device *dev) /* Assignment of Tx CTRP of given queue */ tx_curr_desc = mp->tx_curr_desc_q; - mv_write(MV643XX_ETH_TX_CURRENT_QUEUE_DESC_PTR_0(port_num), + mv_write(TX_CURRENT_QUEUE_DESC_PTR_0(port_num), (u32)((struct eth_tx_desc *)mp->tx_desc_dma + tx_curr_desc)); /* Assignment of Rx CRDP of given queue */ rx_curr_desc = mp->rx_curr_desc_q; - mv_write(MV643XX_ETH_RX_CURRENT_QUEUE_DESC_PTR_0(port_num), + mv_write(RX_CURRENT_QUEUE_DESC_PTR_0(port_num), (u32)((struct eth_rx_desc *)mp->rx_desc_dma + rx_curr_desc)); /* Add the assigned Ethernet address to the port's address table */ eth_port_uc_addr_set(port_num, dev->dev_addr); /* Assign port configuration and command. */ - mv_write(MV643XX_ETH_PORT_CONFIG_REG(port_num), - MV643XX_ETH_PORT_CONFIG_DEFAULT_VALUE); + mv_write(PORT_CONFIG_REG(port_num), + PORT_CONFIG_DEFAULT_VALUE); - mv_write(MV643XX_ETH_PORT_CONFIG_EXTEND_REG(port_num), - MV643XX_ETH_PORT_CONFIG_EXTEND_DEFAULT_VALUE); + mv_write(PORT_CONFIG_EXTEND_REG(port_num), + PORT_CONFIG_EXTEND_DEFAULT_VALUE); - pscr = mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num)); + pscr = mv_read(PORT_SERIAL_CONTROL_REG(port_num)); - pscr &= ~(MV643XX_ETH_SERIAL_PORT_ENABLE | MV643XX_ETH_FORCE_LINK_PASS); - mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), pscr); + pscr &= ~(SERIAL_PORT_ENABLE | FORCE_LINK_PASS); + mv_write(PORT_SERIAL_CONTROL_REG(port_num), pscr); - pscr |= MV643XX_ETH_DISABLE_AUTO_NEG_FOR_FLOW_CTRL | - MV643XX_ETH_DISABLE_AUTO_NEG_SPEED_GMII | - MV643XX_ETH_DISABLE_AUTO_NEG_FOR_DUPLX | - MV643XX_ETH_DO_NOT_FORCE_LINK_FAIL | - MV643XX_ETH_SERIAL_PORT_CONTROL_RESERVED; + pscr |= DISABLE_AUTO_NEG_FOR_FLOW_CTRL | + DISABLE_AUTO_NEG_SPEED_GMII | + DISABLE_AUTO_NEG_FOR_DUPLX | + DO_NOT_FORCE_LINK_FAIL | + SERIAL_PORT_CONTROL_RESERVED; - mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), pscr); + mv_write(PORT_SERIAL_CONTROL_REG(port_num), pscr); - pscr |= MV643XX_ETH_SERIAL_PORT_ENABLE; - mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), pscr); + pscr |= SERIAL_PORT_ENABLE; + mv_write(PORT_SERIAL_CONTROL_REG(port_num), pscr); /* Assign port SDMA configuration */ - mv_write(MV643XX_ETH_SDMA_CONFIG_REG(port_num), - MV643XX_ETH_PORT_SDMA_CONFIG_DEFAULT_VALUE); + mv_write(SDMA_CONFIG_REG(port_num), + PORT_SDMA_CONFIG_DEFAULT_VALUE); /* Enable port Rx. */ mv643xx_eth_port_enable_rx(port_num, ETH_RX_QUEUES_ENABLED); /* Disable port bandwidth limits by clearing MTU register */ - mv_write(MV643XX_ETH_MAXIMUM_TRANSMIT_UNIT(port_num), 0); + mv_write(MAXIMUM_TRANSMIT_UNIT(port_num), 0); /* save phy settings across reset */ mv643xx_get_settings(dev, ðtool_cmd); @@ -1817,11 +1810,11 @@ static void eth_port_uc_addr_set(unsigned int port_num, unsigned char *p_addr) mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) | (p_addr[3] << 0); - mv_write(MV643XX_ETH_MAC_ADDR_LOW(port_num), mac_l); - mv_write(MV643XX_ETH_MAC_ADDR_HIGH(port_num), mac_h); + mv_write(MAC_ADDR_LOW(port_num), mac_l); + mv_write(MAC_ADDR_HIGH(port_num), mac_h); /* Accept frames with this address */ - table = MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE(port_num); + table = DA_FILTER_UNICAST_TABLE_BASE(port_num); eth_port_set_filter_table_entry(table, p_addr[5] & 0x0f); } @@ -1833,8 +1826,8 @@ static void eth_port_uc_addr_get(unsigned int port_num, unsigned char *p_addr) unsigned int mac_h; unsigned int mac_l; - mac_h = mv_read(MV643XX_ETH_MAC_ADDR_HIGH(port_num)); - mac_l = mv_read(MV643XX_ETH_MAC_ADDR_LOW(port_num)); + mac_h = mv_read(MAC_ADDR_HIGH(port_num)); + mac_l = mv_read(MAC_ADDR_LOW(port_num)); p_addr[0] = (mac_h >> 24) & 0xff; p_addr[1] = (mac_h >> 16) & 0xff; @@ -1894,7 +1887,7 @@ static void eth_port_mc_addr(unsigned int eth_port_num, unsigned char *p_addr) if ((p_addr[0] == 0x01) && (p_addr[1] == 0x00) && (p_addr[2] == 0x5E) && (p_addr[3] == 0x00) && (p_addr[4] == 0x00)) { - table = MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE + table = DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE (eth_port_num); eth_port_set_filter_table_entry(table, p_addr[5]); return; @@ -1968,7 +1961,7 @@ static void eth_port_mc_addr(unsigned int eth_port_num, unsigned char *p_addr) for (i = 0; i < 8; i++) crc_result = crc_result | (crc[i] << i); - table = MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num); + table = DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num); eth_port_set_filter_table_entry(table, crc_result); } @@ -1998,7 +1991,7 @@ static void eth_port_set_multicast_list(struct net_device *dev) * 3-1 Queue ETH_Q0=0 * 7-4 Reserved = 0; */ - mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101); + mv_write(DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101); /* Set all entries in DA filter other multicast * table (Ex_dFOMT) @@ -2008,7 +2001,7 @@ static void eth_port_set_multicast_list(struct net_device *dev) * 3-1 Queue ETH_Q0=0 * 7-4 Reserved = 0; */ - mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101); + mv_write(DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101); } return; } @@ -2018,11 +2011,11 @@ static void eth_port_set_multicast_list(struct net_device *dev) */ for (table_index = 0; table_index <= 0xFC; table_index += 4) { /* Clear DA filter special multicast table (Ex_dFSMT) */ - mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE + mv_write(DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE (eth_port_num) + table_index, 0); /* Clear DA filter other multicast table (Ex_dFOMT) */ - mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE + mv_write(DA_FILTER_OTHER_MULTICAST_TABLE_BASE (eth_port_num) + table_index, 0); } @@ -2056,15 +2049,15 @@ static void eth_port_init_mac_tables(unsigned int eth_port_num) /* Clear DA filter unicast table (Ex_dFUT) */ for (table_index = 0; table_index <= 0xC; table_index += 4) - mv_write(MV643XX_ETH_DA_FILTER_UNICAST_TABLE_BASE + mv_write(DA_FILTER_UNICAST_TABLE_BASE (eth_port_num) + table_index, 0); for (table_index = 0; table_index <= 0xFC; table_index += 4) { /* Clear DA filter special multicast table (Ex_dFSMT) */ - mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE + mv_write(DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE (eth_port_num) + table_index, 0); /* Clear DA filter other multicast table (Ex_dFOMT) */ - mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE + mv_write(DA_FILTER_OTHER_MULTICAST_TABLE_BASE (eth_port_num) + table_index, 0); } } @@ -2093,12 +2086,12 @@ static void eth_clear_mib_counters(unsigned int eth_port_num) /* Perform dummy reads from MIB counters */ for (i = ETH_MIB_GOOD_OCTETS_RECEIVED_LOW; i < ETH_MIB_LATE_COLLISION; i += 4) - mv_read(MV643XX_ETH_MIB_COUNTERS_BASE(eth_port_num) + i); + mv_read(MIB_COUNTERS_BASE(eth_port_num) + i); } static inline u32 read_mib(struct mv643xx_private *mp, int offset) { - return mv_read(MV643XX_ETH_MIB_COUNTERS_BASE(mp->port_num) + offset); + return mv_read(MIB_COUNTERS_BASE(mp->port_num) + offset); } static void eth_update_mib_counters(struct mv643xx_private *mp) @@ -2183,7 +2176,7 @@ static int ethernet_phy_get(unsigned int eth_port_num) { unsigned int reg_data; - reg_data = mv_read(MV643XX_ETH_PHY_ADDR_REG); + reg_data = mv_read(PHY_ADDR_REG); return ((reg_data >> (5 * eth_port_num)) & 0x1f); } @@ -2210,10 +2203,10 @@ static void ethernet_phy_set(unsigned int eth_port_num, int phy_addr) u32 reg_data; int addr_shift = 5 * eth_port_num; - reg_data = mv_read(MV643XX_ETH_PHY_ADDR_REG); + reg_data = mv_read(PHY_ADDR_REG); reg_data &= ~(0x1f << addr_shift); reg_data |= (phy_addr & 0x1f) << addr_shift; - mv_write(MV643XX_ETH_PHY_ADDR_REG, reg_data); + mv_write(PHY_ADDR_REG, reg_data); } /* @@ -2251,13 +2244,13 @@ static void ethernet_phy_reset(unsigned int eth_port_num) static void mv643xx_eth_port_enable_tx(unsigned int port_num, unsigned int queues) { - mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), queues); + mv_write(TRANSMIT_QUEUE_COMMAND_REG(port_num), queues); } static void mv643xx_eth_port_enable_rx(unsigned int port_num, unsigned int queues) { - mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), queues); + mv_write(RECEIVE_QUEUE_COMMAND_REG(port_num), queues); } static unsigned int mv643xx_eth_port_disable_tx(unsigned int port_num) @@ -2265,21 +2258,18 @@ static unsigned int mv643xx_eth_port_disable_tx(unsigned int port_num) u32 queues; /* Stop Tx port activity. Check port Tx activity. */ - queues = mv_read(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num)) - & 0xFF; + queues = mv_read(TRANSMIT_QUEUE_COMMAND_REG(port_num)) & 0xFF; if (queues) { /* Issue stop command for active queues only */ - mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), - (queues << 8)); + mv_write(TRANSMIT_QUEUE_COMMAND_REG(port_num), (queues << 8)); /* Wait for all Tx activity to terminate. */ /* Check port cause register that all Tx queues are stopped */ - while (mv_read(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num)) - & 0xFF) + while (mv_read(TRANSMIT_QUEUE_COMMAND_REG(port_num)) & 0xFF) udelay(PHY_WAIT_MICRO_SECONDS); /* Wait for Tx FIFO to empty */ - while (mv_read(MV643XX_ETH_PORT_STATUS_REG(port_num)) & + while (mv_read(PORT_STATUS_REG(port_num)) & ETH_PORT_TX_FIFO_EMPTY) udelay(PHY_WAIT_MICRO_SECONDS); } @@ -2292,17 +2282,14 @@ static unsigned int mv643xx_eth_port_disable_rx(unsigned int port_num) u32 queues; /* Stop Rx port activity. Check port Rx activity. */ - queues = mv_read(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num)) - & 0xFF; + queues = mv_read(RECEIVE_QUEUE_COMMAND_REG(port_num)) & 0xFF; if (queues) { /* Issue stop command for active queues only */ - mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), - (queues << 8)); + mv_write(RECEIVE_QUEUE_COMMAND_REG(port_num), (queues << 8)); /* Wait for all Rx activity to terminate. */ /* Check port cause register that all Rx queues are stopped */ - while (mv_read(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num)) - & 0xFF) + while (mv_read(RECEIVE_QUEUE_COMMAND_REG(port_num)) & 0xFF) udelay(PHY_WAIT_MICRO_SECONDS); } @@ -2338,11 +2325,11 @@ static void eth_port_reset(unsigned int port_num) eth_clear_mib_counters(port_num); /* Reset the Enable bit in the Configuration Register */ - reg_data = mv_read(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num)); - reg_data &= ~(MV643XX_ETH_SERIAL_PORT_ENABLE | - MV643XX_ETH_DO_NOT_FORCE_LINK_FAIL | - MV643XX_ETH_FORCE_LINK_PASS); - mv_write(MV643XX_ETH_PORT_SERIAL_CONTROL_REG(port_num), reg_data); + reg_data = mv_read(PORT_SERIAL_CONTROL_REG(port_num)); + reg_data &= ~(SERIAL_PORT_ENABLE | + DO_NOT_FORCE_LINK_FAIL | + FORCE_LINK_PASS); + mv_write(PORT_SERIAL_CONTROL_REG(port_num), reg_data); } @@ -2377,7 +2364,7 @@ static void eth_port_read_smi_reg(unsigned int port_num, spin_lock_irqsave(&mv643xx_eth_phy_lock, flags); /* wait for the SMI register to become available */ - for (i = 0; mv_read(MV643XX_ETH_SMI_REG) & ETH_SMI_BUSY; i++) { + for (i = 0; mv_read(SMI_REG) & ETH_SMI_BUSY; i++) { if (i == PHY_WAIT_ITERATIONS) { printk("mv643xx PHY busy timeout, port %d\n", port_num); goto out; @@ -2385,11 +2372,11 @@ static void eth_port_read_smi_reg(unsigned int port_num, udelay(PHY_WAIT_MICRO_SECONDS); } - mv_write(MV643XX_ETH_SMI_REG, + mv_write(SMI_REG, (phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_READ); /* now wait for the data to be valid */ - for (i = 0; !(mv_read(MV643XX_ETH_SMI_REG) & ETH_SMI_READ_VALID); i++) { + for (i = 0; !(mv_read(SMI_REG) & ETH_SMI_READ_VALID); i++) { if (i == PHY_WAIT_ITERATIONS) { printk("mv643xx PHY read timeout, port %d\n", port_num); goto out; @@ -2397,7 +2384,7 @@ static void eth_port_read_smi_reg(unsigned int port_num, udelay(PHY_WAIT_MICRO_SECONDS); } - *value = mv_read(MV643XX_ETH_SMI_REG) & 0xffff; + *value = mv_read(SMI_REG) & 0xffff; out: spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags); } @@ -2435,7 +2422,7 @@ static void eth_port_write_smi_reg(unsigned int eth_port_num, spin_lock_irqsave(&mv643xx_eth_phy_lock, flags); /* wait for the SMI register to become available */ - for (i = 0; mv_read(MV643XX_ETH_SMI_REG) & ETH_SMI_BUSY; i++) { + for (i = 0; mv_read(SMI_REG) & ETH_SMI_BUSY; i++) { if (i == PHY_WAIT_ITERATIONS) { printk("mv643xx PHY busy timeout, port %d\n", eth_port_num); @@ -2444,7 +2431,7 @@ static void eth_port_write_smi_reg(unsigned int eth_port_num, udelay(PHY_WAIT_MICRO_SECONDS); } - mv_write(MV643XX_ETH_SMI_REG, (phy_addr << 16) | (phy_reg << 21) | + mv_write(SMI_REG, (phy_addr << 16) | (phy_reg << 21) | ETH_SMI_OPCODE_WRITE | (value & 0xffff)); out: spin_unlock_irqrestore(&mv643xx_eth_phy_lock, flags); |