Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: e100: Use pci_pme_active to clear PME_Status and disable PME# e1000: prevent corruption of EEPROM/NVM forcedeth: call restore mac addr in nv_shutdown path bnx2: Promote vector field in bnx2_irq structure from u16 to unsigned int sctp: Fix oops when INIT-ACK indicates that peer doesn't support AUTH sctp: do not enable peer features if we can't do them. sctp: set the skb->ip_summed correctly when sending over loopback. udp: Fix rcv socket locking
This commit is contained in:
commit
764527a1b3
@ -6597,7 +6597,7 @@ struct flash_spec {
|
|||||||
|
|
||||||
struct bnx2_irq {
|
struct bnx2_irq {
|
||||||
irq_handler_t handler;
|
irq_handler_t handler;
|
||||||
u16 vector;
|
unsigned int vector;
|
||||||
u8 requested;
|
u8 requested;
|
||||||
char name[16];
|
char name[16];
|
||||||
};
|
};
|
||||||
|
@ -2738,9 +2738,7 @@ static int __devinit e100_probe(struct pci_dev *pdev,
|
|||||||
nic->flags |= wol_magic;
|
nic->flags |= wol_magic;
|
||||||
|
|
||||||
/* ack any pending wake events, disable PME */
|
/* ack any pending wake events, disable PME */
|
||||||
err = pci_enable_wake(pdev, 0, 0);
|
pci_pme_active(pdev, false);
|
||||||
if (err)
|
|
||||||
DPRINTK(PROBE, ERR, "Error clearing wake event\n");
|
|
||||||
|
|
||||||
strcpy(netdev->name, "eth%d");
|
strcpy(netdev->name, "eth%d");
|
||||||
if((err = register_netdev(netdev))) {
|
if((err = register_netdev(netdev))) {
|
||||||
|
@ -144,6 +144,8 @@ static s32 e1000_host_if_read_cookie(struct e1000_hw *hw, u8 *buffer);
|
|||||||
static u8 e1000_calculate_mng_checksum(char *buffer, u32 length);
|
static u8 e1000_calculate_mng_checksum(char *buffer, u32 length);
|
||||||
static s32 e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, u16 duplex);
|
static s32 e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, u16 duplex);
|
||||||
static s32 e1000_configure_kmrn_for_1000(struct e1000_hw *hw);
|
static s32 e1000_configure_kmrn_for_1000(struct e1000_hw *hw);
|
||||||
|
static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
|
||||||
|
static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
|
||||||
|
|
||||||
/* IGP cable length table */
|
/* IGP cable length table */
|
||||||
static const
|
static const
|
||||||
@ -168,6 +170,8 @@ u16 e1000_igp_2_cable_length_table[IGP02E1000_AGC_LENGTH_TABLE_SIZE] =
|
|||||||
83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124,
|
83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124,
|
||||||
104, 109, 114, 118, 121, 124};
|
104, 109, 114, 118, 121, 124};
|
||||||
|
|
||||||
|
static DEFINE_SPINLOCK(e1000_eeprom_lock);
|
||||||
|
|
||||||
/******************************************************************************
|
/******************************************************************************
|
||||||
* Set the phy type member in the hw struct.
|
* Set the phy type member in the hw struct.
|
||||||
*
|
*
|
||||||
@ -4903,6 +4907,15 @@ static s32 e1000_spi_eeprom_ready(struct e1000_hw *hw)
|
|||||||
* words - number of words to read
|
* words - number of words to read
|
||||||
*****************************************************************************/
|
*****************************************************************************/
|
||||||
s32 e1000_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
|
s32 e1000_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
|
||||||
|
{
|
||||||
|
s32 ret;
|
||||||
|
spin_lock(&e1000_eeprom_lock);
|
||||||
|
ret = e1000_do_read_eeprom(hw, offset, words, data);
|
||||||
|
spin_unlock(&e1000_eeprom_lock);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
|
||||||
{
|
{
|
||||||
struct e1000_eeprom_info *eeprom = &hw->eeprom;
|
struct e1000_eeprom_info *eeprom = &hw->eeprom;
|
||||||
u32 i = 0;
|
u32 i = 0;
|
||||||
@ -5235,6 +5248,16 @@ s32 e1000_update_eeprom_checksum(struct e1000_hw *hw)
|
|||||||
* EEPROM will most likely contain an invalid checksum.
|
* EEPROM will most likely contain an invalid checksum.
|
||||||
*****************************************************************************/
|
*****************************************************************************/
|
||||||
s32 e1000_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
|
s32 e1000_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
|
||||||
|
{
|
||||||
|
s32 ret;
|
||||||
|
spin_lock(&e1000_eeprom_lock);
|
||||||
|
ret = e1000_do_write_eeprom(hw, offset, words, data);
|
||||||
|
spin_unlock(&e1000_eeprom_lock);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
|
||||||
{
|
{
|
||||||
struct e1000_eeprom_info *eeprom = &hw->eeprom;
|
struct e1000_eeprom_info *eeprom = &hw->eeprom;
|
||||||
s32 status = 0;
|
s32 status = 0;
|
||||||
|
@ -5643,6 +5643,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
|
|||||||
dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
|
dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
|
||||||
dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
|
dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
|
||||||
writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
|
writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
|
||||||
|
printk(KERN_DEBUG "nv_probe: set workaround bit for reversed mac addr\n");
|
||||||
}
|
}
|
||||||
memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
|
memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
|
||||||
|
|
||||||
@ -5890,14 +5891,12 @@ static void nv_restore_phy(struct net_device *dev)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __devexit nv_remove(struct pci_dev *pci_dev)
|
static void nv_restore_mac_addr(struct pci_dev *pci_dev)
|
||||||
{
|
{
|
||||||
struct net_device *dev = pci_get_drvdata(pci_dev);
|
struct net_device *dev = pci_get_drvdata(pci_dev);
|
||||||
struct fe_priv *np = netdev_priv(dev);
|
struct fe_priv *np = netdev_priv(dev);
|
||||||
u8 __iomem *base = get_hwbase(dev);
|
u8 __iomem *base = get_hwbase(dev);
|
||||||
|
|
||||||
unregister_netdev(dev);
|
|
||||||
|
|
||||||
/* special op: write back the misordered MAC address - otherwise
|
/* special op: write back the misordered MAC address - otherwise
|
||||||
* the next nv_probe would see a wrong address.
|
* the next nv_probe would see a wrong address.
|
||||||
*/
|
*/
|
||||||
@ -5905,6 +5904,15 @@ static void __devexit nv_remove(struct pci_dev *pci_dev)
|
|||||||
writel(np->orig_mac[1], base + NvRegMacAddrB);
|
writel(np->orig_mac[1], base + NvRegMacAddrB);
|
||||||
writel(readl(base + NvRegTransmitPoll) & ~NVREG_TRANSMITPOLL_MAC_ADDR_REV,
|
writel(readl(base + NvRegTransmitPoll) & ~NVREG_TRANSMITPOLL_MAC_ADDR_REV,
|
||||||
base + NvRegTransmitPoll);
|
base + NvRegTransmitPoll);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __devexit nv_remove(struct pci_dev *pci_dev)
|
||||||
|
{
|
||||||
|
struct net_device *dev = pci_get_drvdata(pci_dev);
|
||||||
|
|
||||||
|
unregister_netdev(dev);
|
||||||
|
|
||||||
|
nv_restore_mac_addr(pci_dev);
|
||||||
|
|
||||||
/* restore any phy related changes */
|
/* restore any phy related changes */
|
||||||
nv_restore_phy(dev);
|
nv_restore_phy(dev);
|
||||||
@ -5975,6 +5983,8 @@ static void nv_shutdown(struct pci_dev *pdev)
|
|||||||
if (netif_running(dev))
|
if (netif_running(dev))
|
||||||
nv_close(dev);
|
nv_close(dev);
|
||||||
|
|
||||||
|
nv_restore_mac_addr(pdev);
|
||||||
|
|
||||||
pci_disable_device(pdev);
|
pci_disable_device(pdev);
|
||||||
if (system_state == SYSTEM_POWER_OFF) {
|
if (system_state == SYSTEM_POWER_OFF) {
|
||||||
if (pci_enable_wake(pdev, PCI_D3cold, np->wolenabled))
|
if (pci_enable_wake(pdev, PCI_D3cold, np->wolenabled))
|
||||||
|
@ -951,6 +951,27 @@ int udp_disconnect(struct sock *sk, int flags)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
|
||||||
|
{
|
||||||
|
int is_udplite = IS_UDPLITE(sk);
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
if ((rc = sock_queue_rcv_skb(sk, skb)) < 0) {
|
||||||
|
/* Note that an ENOMEM error is charged twice */
|
||||||
|
if (rc == -ENOMEM)
|
||||||
|
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
|
||||||
|
is_udplite);
|
||||||
|
goto drop;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
drop:
|
||||||
|
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
|
||||||
|
kfree_skb(skb);
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
/* returns:
|
/* returns:
|
||||||
* -1: error
|
* -1: error
|
||||||
* 0: success
|
* 0: success
|
||||||
@ -989,9 +1010,7 @@ int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
|
|||||||
up->encap_rcv != NULL) {
|
up->encap_rcv != NULL) {
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
bh_unlock_sock(sk);
|
|
||||||
ret = (*up->encap_rcv)(sk, skb);
|
ret = (*up->encap_rcv)(sk, skb);
|
||||||
bh_lock_sock(sk);
|
|
||||||
if (ret <= 0) {
|
if (ret <= 0) {
|
||||||
UDP_INC_STATS_BH(sock_net(sk),
|
UDP_INC_STATS_BH(sock_net(sk),
|
||||||
UDP_MIB_INDATAGRAMS,
|
UDP_MIB_INDATAGRAMS,
|
||||||
@ -1044,17 +1063,16 @@ int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
|
|||||||
goto drop;
|
goto drop;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) {
|
rc = 0;
|
||||||
/* Note that an ENOMEM error is charged twice */
|
|
||||||
if (rc == -ENOMEM) {
|
|
||||||
UDP_INC_STATS_BH(sock_net(sk),
|
|
||||||
UDP_MIB_RCVBUFERRORS, is_udplite);
|
|
||||||
atomic_inc(&sk->sk_drops);
|
|
||||||
}
|
|
||||||
goto drop;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
bh_lock_sock(sk);
|
||||||
|
if (!sock_owned_by_user(sk))
|
||||||
|
rc = __udp_queue_rcv_skb(sk, skb);
|
||||||
|
else
|
||||||
|
sk_add_backlog(sk, skb);
|
||||||
|
bh_unlock_sock(sk);
|
||||||
|
|
||||||
|
return rc;
|
||||||
|
|
||||||
drop:
|
drop:
|
||||||
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
|
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
|
||||||
@ -1092,15 +1110,7 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
|
|||||||
skb1 = skb_clone(skb, GFP_ATOMIC);
|
skb1 = skb_clone(skb, GFP_ATOMIC);
|
||||||
|
|
||||||
if (skb1) {
|
if (skb1) {
|
||||||
int ret = 0;
|
int ret = udp_queue_rcv_skb(sk, skb1);
|
||||||
|
|
||||||
bh_lock_sock(sk);
|
|
||||||
if (!sock_owned_by_user(sk))
|
|
||||||
ret = udp_queue_rcv_skb(sk, skb1);
|
|
||||||
else
|
|
||||||
sk_add_backlog(sk, skb1);
|
|
||||||
bh_unlock_sock(sk);
|
|
||||||
|
|
||||||
if (ret > 0)
|
if (ret > 0)
|
||||||
/* we should probably re-process instead
|
/* we should probably re-process instead
|
||||||
* of dropping packets here. */
|
* of dropping packets here. */
|
||||||
@ -1195,13 +1205,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
|
|||||||
uh->dest, inet_iif(skb), udptable);
|
uh->dest, inet_iif(skb), udptable);
|
||||||
|
|
||||||
if (sk != NULL) {
|
if (sk != NULL) {
|
||||||
int ret = 0;
|
int ret = udp_queue_rcv_skb(sk, skb);
|
||||||
bh_lock_sock(sk);
|
|
||||||
if (!sock_owned_by_user(sk))
|
|
||||||
ret = udp_queue_rcv_skb(sk, skb);
|
|
||||||
else
|
|
||||||
sk_add_backlog(sk, skb);
|
|
||||||
bh_unlock_sock(sk);
|
|
||||||
sock_put(sk);
|
sock_put(sk);
|
||||||
|
|
||||||
/* a return value > 0 means to resubmit the input, but
|
/* a return value > 0 means to resubmit the input, but
|
||||||
@ -1494,7 +1498,7 @@ struct proto udp_prot = {
|
|||||||
.sendmsg = udp_sendmsg,
|
.sendmsg = udp_sendmsg,
|
||||||
.recvmsg = udp_recvmsg,
|
.recvmsg = udp_recvmsg,
|
||||||
.sendpage = udp_sendpage,
|
.sendpage = udp_sendpage,
|
||||||
.backlog_rcv = udp_queue_rcv_skb,
|
.backlog_rcv = __udp_queue_rcv_skb,
|
||||||
.hash = udp_lib_hash,
|
.hash = udp_lib_hash,
|
||||||
.unhash = udp_lib_unhash,
|
.unhash = udp_lib_unhash,
|
||||||
.get_port = udp_v4_get_port,
|
.get_port = udp_v4_get_port,
|
||||||
|
@ -599,11 +599,12 @@ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc,
|
|||||||
/* Check to see if this is a duplicate. */
|
/* Check to see if this is a duplicate. */
|
||||||
peer = sctp_assoc_lookup_paddr(asoc, addr);
|
peer = sctp_assoc_lookup_paddr(asoc, addr);
|
||||||
if (peer) {
|
if (peer) {
|
||||||
|
/* An UNKNOWN state is only set on transports added by
|
||||||
|
* user in sctp_connectx() call. Such transports should be
|
||||||
|
* considered CONFIRMED per RFC 4960, Section 5.4.
|
||||||
|
*/
|
||||||
if (peer->state == SCTP_UNKNOWN) {
|
if (peer->state == SCTP_UNKNOWN) {
|
||||||
if (peer_state == SCTP_ACTIVE)
|
peer->state = SCTP_ACTIVE;
|
||||||
peer->state = SCTP_ACTIVE;
|
|
||||||
if (peer_state == SCTP_UNCONFIRMED)
|
|
||||||
peer->state = SCTP_UNCONFIRMED;
|
|
||||||
}
|
}
|
||||||
return peer;
|
return peer;
|
||||||
}
|
}
|
||||||
|
@ -533,7 +533,8 @@ int sctp_packet_transmit(struct sctp_packet *packet)
|
|||||||
if (!(dst->dev->features & NETIF_F_NO_CSUM)) {
|
if (!(dst->dev->features & NETIF_F_NO_CSUM)) {
|
||||||
crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len);
|
crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len);
|
||||||
crc32 = sctp_end_cksum(crc32);
|
crc32 = sctp_end_cksum(crc32);
|
||||||
}
|
} else
|
||||||
|
nskb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||||
|
|
||||||
/* 3) Put the resultant value into the checksum field in the
|
/* 3) Put the resultant value into the checksum field in the
|
||||||
* common header, and leave the rest of the bits unchanged.
|
* common header, and leave the rest of the bits unchanged.
|
||||||
|
@ -1886,11 +1886,13 @@ static void sctp_process_ext_param(struct sctp_association *asoc,
|
|||||||
/* if the peer reports AUTH, assume that he
|
/* if the peer reports AUTH, assume that he
|
||||||
* supports AUTH.
|
* supports AUTH.
|
||||||
*/
|
*/
|
||||||
asoc->peer.auth_capable = 1;
|
if (sctp_auth_enable)
|
||||||
|
asoc->peer.auth_capable = 1;
|
||||||
break;
|
break;
|
||||||
case SCTP_CID_ASCONF:
|
case SCTP_CID_ASCONF:
|
||||||
case SCTP_CID_ASCONF_ACK:
|
case SCTP_CID_ASCONF_ACK:
|
||||||
asoc->peer.asconf_capable = 1;
|
if (sctp_addip_enable)
|
||||||
|
asoc->peer.asconf_capable = 1;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
@ -2319,12 +2321,10 @@ int sctp_process_init(struct sctp_association *asoc, sctp_cid_t cid,
|
|||||||
/* Release the transport structures. */
|
/* Release the transport structures. */
|
||||||
list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
|
list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
|
||||||
transport = list_entry(pos, struct sctp_transport, transports);
|
transport = list_entry(pos, struct sctp_transport, transports);
|
||||||
list_del_init(pos);
|
if (transport->state != SCTP_ACTIVE)
|
||||||
sctp_transport_free(transport);
|
sctp_assoc_rm_peer(asoc, transport);
|
||||||
}
|
}
|
||||||
|
|
||||||
asoc->peer.transport_count = 0;
|
|
||||||
|
|
||||||
nomem:
|
nomem:
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -2460,6 +2460,9 @@ static int sctp_process_param(struct sctp_association *asoc,
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case SCTP_PARAM_SET_PRIMARY:
|
case SCTP_PARAM_SET_PRIMARY:
|
||||||
|
if (!sctp_addip_enable)
|
||||||
|
goto fall_through;
|
||||||
|
|
||||||
addr_param = param.v + sizeof(sctp_addip_param_t);
|
addr_param = param.v + sizeof(sctp_addip_param_t);
|
||||||
|
|
||||||
af = sctp_get_af_specific(param_type2af(param.p->type));
|
af = sctp_get_af_specific(param_type2af(param.p->type));
|
||||||
|
Loading…
Reference in New Issue
Block a user