diff --git a/[refs] b/[refs] index 3a4f0dba2bc4..790fd89fc4a1 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: dfbce08c19cba2ba4faaf8c0dd6d7678f46c78dd +refs/heads/master: 5a05fae5ca7cd5279567747fc34d60413b504cd6 diff --git a/trunk/Documentation/networking/can.txt b/trunk/Documentation/networking/can.txt index 820f55344edc..a06741898f29 100644 --- a/trunk/Documentation/networking/can.txt +++ b/trunk/Documentation/networking/can.txt @@ -22,8 +22,7 @@ This file contains 4.1.2 RAW socket option CAN_RAW_ERR_FILTER 4.1.3 RAW socket option CAN_RAW_LOOPBACK 4.1.4 RAW socket option CAN_RAW_RECV_OWN_MSGS - 4.1.5 RAW socket option CAN_RAW_FD_FRAMES - 4.1.6 RAW socket returned message flags + 4.1.5 RAW socket returned message flags 4.2 Broadcast Manager protocol sockets (SOCK_DGRAM) 4.3 connected transport protocols (SOCK_SEQPACKET) 4.4 unconnected transport protocols (SOCK_DGRAM) @@ -42,8 +41,7 @@ This file contains 6.5.1 Netlink interface to set/get devices properties 6.5.2 Setting the CAN bit-timing 6.5.3 Starting and stopping the CAN network device - 6.6 CAN FD (flexible data rate) driver support - 6.7 supported CAN hardware + 6.6 supported CAN hardware 7 Socket CAN resources @@ -275,7 +273,7 @@ solution for a couple of reasons: struct can_frame { canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */ - __u8 can_dlc; /* frame payload length in byte (0 .. 8) */ + __u8 can_dlc; /* data length code: 0 .. 8 */ __u8 data[8] __attribute__((aligned(8))); }; @@ -377,51 +375,6 @@ solution for a couple of reasons: nbytes = sendto(s, &frame, sizeof(struct can_frame), 0, (struct sockaddr*)&addr, sizeof(addr)); - Remark about CAN FD (flexible data rate) support: - - Generally the handling of CAN FD is very similar to the formerly described - examples. The new CAN FD capable CAN controllers support two different - bitrates for the arbitration phase and the payload phase of the CAN FD frame - and up to 64 bytes of payload. This extended payload length breaks all the - kernel interfaces (ABI) which heavily rely on the CAN frame with fixed eight - bytes of payload (struct can_frame) like the CAN_RAW socket. Therefore e.g. - the CAN_RAW socket supports a new socket option CAN_RAW_FD_FRAMES that - switches the socket into a mode that allows the handling of CAN FD frames - and (legacy) CAN frames simultaneously (see section 4.1.5). - - The struct canfd_frame is defined in include/linux/can.h: - - struct canfd_frame { - canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */ - __u8 len; /* frame payload length in byte (0 .. 64) */ - __u8 flags; /* additional flags for CAN FD */ - __u8 __res0; /* reserved / padding */ - __u8 __res1; /* reserved / padding */ - __u8 data[64] __attribute__((aligned(8))); - }; - - The struct canfd_frame and the existing struct can_frame have the can_id, - the payload length and the payload data at the same offset inside their - structures. This allows to handle the different structures very similar. - When the content of a struct can_frame is copied into a struct canfd_frame - all structure elements can be used as-is - only the data[] becomes extended. - - When introducing the struct canfd_frame it turned out that the data length - code (DLC) of the struct can_frame was used as a length information as the - length and the DLC has a 1:1 mapping in the range of 0 .. 8. To preserve - the easy handling of the length information the canfd_frame.len element - contains a plain length value from 0 .. 64. So both canfd_frame.len and - can_frame.can_dlc are equal and contain a length information and no DLC. - For details about the distinction of CAN and CAN FD capable devices and - the mapping to the bus-relevant data length code (DLC), see chapter 6.6. - - The length of the two CAN(FD) frame structures define the maximum transfer - unit (MTU) of the CAN(FD) network interface and skbuff data length. Two - definitions are specified for CAN specific MTUs in include/linux/can.h : - - #define CAN_MTU (sizeof(struct can_frame)) == 16 => 'legacy' CAN frame - #define CANFD_MTU (sizeof(struct canfd_frame)) == 72 => CAN FD frame - 4.1 RAW protocol sockets with can_filters (SOCK_RAW) Using CAN_RAW sockets is extensively comparable to the commonly @@ -519,69 +472,7 @@ solution for a couple of reasons: setsockopt(s, SOL_CAN_RAW, CAN_RAW_RECV_OWN_MSGS, &recv_own_msgs, sizeof(recv_own_msgs)); - 4.1.5 RAW socket option CAN_RAW_FD_FRAMES - - CAN FD support in CAN_RAW sockets can be enabled with a new socket option - CAN_RAW_FD_FRAMES which is off by default. When the new socket option is - not supported by the CAN_RAW socket (e.g. on older kernels), switching the - CAN_RAW_FD_FRAMES option returns the error -ENOPROTOOPT. - - Once CAN_RAW_FD_FRAMES is enabled the application can send both CAN frames - and CAN FD frames. OTOH the application has to handle CAN and CAN FD frames - when reading from the socket. - - CAN_RAW_FD_FRAMES enabled: CAN_MTU and CANFD_MTU are allowed - CAN_RAW_FD_FRAMES disabled: only CAN_MTU is allowed (default) - - Example: - [ remember: CANFD_MTU == sizeof(struct canfd_frame) ] - - struct canfd_frame cfd; - - nbytes = read(s, &cfd, CANFD_MTU); - - if (nbytes == CANFD_MTU) { - printf("got CAN FD frame with length %d\n", cfd.len); - /* cfd.flags contains valid data */ - } else if (nbytes == CAN_MTU) { - printf("got legacy CAN frame with length %d\n", cfd.len); - /* cfd.flags is undefined */ - } else { - fprintf(stderr, "read: invalid CAN(FD) frame\n"); - return 1; - } - - /* the content can be handled independently from the received MTU size */ - - printf("can_id: %X data length: %d data: ", cfd.can_id, cfd.len); - for (i = 0; i < cfd.len; i++) - printf("%02X ", cfd.data[i]); - - When reading with size CANFD_MTU only returns CAN_MTU bytes that have - been received from the socket a legacy CAN frame has been read into the - provided CAN FD structure. Note that the canfd_frame.flags data field is - not specified in the struct can_frame and therefore it is only valid in - CANFD_MTU sized CAN FD frames. - - As long as the payload length is <=8 the received CAN frames from CAN FD - capable CAN devices can be received and read by legacy sockets too. When - user-generated CAN FD frames have a payload length <=8 these can be send - by legacy CAN network interfaces too. Sending CAN FD frames with payload - length > 8 to a legacy CAN network interface returns an -EMSGSIZE error. - - Implementation hint for new CAN applications: - - To build a CAN FD aware application use struct canfd_frame as basic CAN - data structure for CAN_RAW based applications. When the application is - executed on an older Linux kernel and switching the CAN_RAW_FD_FRAMES - socket option returns an error: No problem. You'll get legacy CAN frames - or CAN FD frames and can process them the same way. - - When sending to CAN devices make sure that the device is capable to handle - CAN FD frames by checking if the device maximum transfer unit is CANFD_MTU. - The CAN device MTU can be retrieved e.g. with a SIOCGIFMTU ioctl() syscall. - - 4.1.6 RAW socket returned message flags + 4.1.5 RAW socket returned message flags When using recvmsg() call, the msg->msg_flags may contain following flags: @@ -682,13 +573,10 @@ solution for a couple of reasons: dev->type = ARPHRD_CAN; /* the netdevice hardware type */ dev->flags = IFF_NOARP; /* CAN has no arp */ - dev->mtu = CAN_MTU; /* sizeof(struct can_frame) -> legacy CAN interface */ + dev->mtu = sizeof(struct can_frame); - or alternative, when the controller supports CAN with flexible data rate: - dev->mtu = CANFD_MTU; /* sizeof(struct canfd_frame) -> CAN FD interface */ - - The struct can_frame or struct canfd_frame is the payload of each socket - buffer (skbuff) in the protocol family PF_CAN. + The struct can_frame is the payload of each socket buffer in the + protocol family PF_CAN. 6.2 local loopback of sent frames @@ -904,33 +792,7 @@ solution for a couple of reasons: Note that a restart will also create a CAN error message frame (see also chapter 3.4). - 6.6 CAN FD (flexible data rate) driver support - - CAN FD capable CAN controllers support two different bitrates for the - arbitration phase and the payload phase of the CAN FD frame. Therefore a - second bittiming has to be specified in order to enable the CAN FD bitrate. - - Additionally CAN FD capable CAN controllers support up to 64 bytes of - payload. The representation of this length in can_frame.can_dlc and - canfd_frame.len for userspace applications and inside the Linux network - layer is a plain value from 0 .. 64 instead of the CAN 'data length code'. - The data length code was a 1:1 mapping to the payload length in the legacy - CAN frames anyway. The payload length to the bus-relevant DLC mapping is - only performed inside the CAN drivers, preferably with the helper - functions can_dlc2len() and can_len2dlc(). - - The CAN netdevice driver capabilities can be distinguished by the network - devices maximum transfer unit (MTU): - - MTU = 16 (CAN_MTU) => sizeof(struct can_frame) => 'legacy' CAN device - MTU = 72 (CANFD_MTU) => sizeof(struct canfd_frame) => CAN FD capable device - - The CAN device MTU can be retrieved e.g. with a SIOCGIFMTU ioctl() syscall. - N.B. CAN FD capable devices can also handle and send legacy CAN frames. - - FIXME: Add details about the CAN FD controller configuration when available. - - 6.7 Supported CAN hardware + 6.6 Supported CAN hardware Please check the "Kconfig" file in "drivers/net/can" to get an actual list of the support CAN hardware. On the Socket CAN project website diff --git a/trunk/drivers/bluetooth/bluecard_cs.c b/trunk/drivers/bluetooth/bluecard_cs.c index 585c88e01893..1fcd92380356 100644 --- a/trunk/drivers/bluetooth/bluecard_cs.c +++ b/trunk/drivers/bluetooth/bluecard_cs.c @@ -231,12 +231,12 @@ static void bluecard_write_wakeup(bluecard_info_t *info) } do { - unsigned int iobase = info->p_dev->resource[0]->start; - unsigned int offset; - unsigned char command; - unsigned long ready_bit; + register unsigned int iobase = info->p_dev->resource[0]->start; + register unsigned int offset; + register unsigned char command; + register unsigned long ready_bit; register struct sk_buff *skb; - int len; + register int len; clear_bit(XMIT_WAKEUP, &(info->tx_state)); diff --git a/trunk/drivers/bluetooth/bpa10x.c b/trunk/drivers/bluetooth/bpa10x.c index 29caaed2d715..609861a53c28 100644 --- a/trunk/drivers/bluetooth/bpa10x.c +++ b/trunk/drivers/bluetooth/bpa10x.c @@ -470,7 +470,7 @@ static int bpa10x_probe(struct usb_interface *intf, const struct usb_device_id * hdev->flush = bpa10x_flush; hdev->send = bpa10x_send_frame; - set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks); + set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); err = hci_register_dev(hdev); if (err < 0) { diff --git a/trunk/drivers/bluetooth/bt3c_cs.c b/trunk/drivers/bluetooth/bt3c_cs.c index b2b0fbbb43b5..308c8599ab55 100644 --- a/trunk/drivers/bluetooth/bt3c_cs.c +++ b/trunk/drivers/bluetooth/bt3c_cs.c @@ -186,9 +186,9 @@ static void bt3c_write_wakeup(bt3c_info_t *info) return; do { - unsigned int iobase = info->p_dev->resource[0]->start; + register unsigned int iobase = info->p_dev->resource[0]->start; register struct sk_buff *skb; - int len; + register int len; if (!pcmcia_dev_present(info->p_dev)) break; diff --git a/trunk/drivers/bluetooth/btmrvl_sdio.c b/trunk/drivers/bluetooth/btmrvl_sdio.c index 2867499f7256..a853244e7fd7 100644 --- a/trunk/drivers/bluetooth/btmrvl_sdio.c +++ b/trunk/drivers/bluetooth/btmrvl_sdio.c @@ -110,9 +110,6 @@ static const struct sdio_device_id btmrvl_sdio_ids[] = { /* Marvell SD8787 Bluetooth device */ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911A), .driver_data = (unsigned long) &btmrvl_sdio_sd8787 }, - /* Marvell SD8787 Bluetooth AMP device */ - { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911B), - .driver_data = (unsigned long) &btmrvl_sdio_sd8787 }, /* Marvell SD8797 Bluetooth device */ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912A), .driver_data = (unsigned long) &btmrvl_sdio_sd8797 }, diff --git a/trunk/drivers/bluetooth/btuart_cs.c b/trunk/drivers/bluetooth/btuart_cs.c index 65b8d996840c..c4fc2f3fc32c 100644 --- a/trunk/drivers/bluetooth/btuart_cs.c +++ b/trunk/drivers/bluetooth/btuart_cs.c @@ -140,9 +140,9 @@ static void btuart_write_wakeup(btuart_info_t *info) } do { - unsigned int iobase = info->p_dev->resource[0]->start; + register unsigned int iobase = info->p_dev->resource[0]->start; register struct sk_buff *skb; - int len; + register int len; clear_bit(XMIT_WAKEUP, &(info->tx_state)); diff --git a/trunk/drivers/bluetooth/btusb.c b/trunk/drivers/bluetooth/btusb.c index a45e717f5f84..c9463af8e564 100644 --- a/trunk/drivers/bluetooth/btusb.c +++ b/trunk/drivers/bluetooth/btusb.c @@ -21,7 +21,15 @@ * */ +#include #include +#include +#include +#include +#include +#include +#include + #include #include @@ -1018,7 +1026,7 @@ static int btusb_probe(struct usb_interface *intf, data->isoc = usb_ifnum_to_if(data->udev, 1); if (!reset) - set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks); + set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); if (force_scofix || id->driver_info & BTUSB_WRONG_SCO_MTU) { if (!disable_scofix) @@ -1030,7 +1038,7 @@ static int btusb_probe(struct usb_interface *intf, if (id->driver_info & BTUSB_DIGIANSWER) { data->cmdreq_type = USB_TYPE_VENDOR; - set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks); + set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); } if (id->driver_info & BTUSB_CSR) { @@ -1038,7 +1046,7 @@ static int btusb_probe(struct usb_interface *intf, /* Old firmware would otherwise execute USB reset */ if (le16_to_cpu(udev->descriptor.bcdDevice) < 0x117) - set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks); + set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); } if (id->driver_info & BTUSB_SNIFFER) { diff --git a/trunk/drivers/bluetooth/dtl1_cs.c b/trunk/drivers/bluetooth/dtl1_cs.c index b1b37ccd3cd4..6e8d96189684 100644 --- a/trunk/drivers/bluetooth/dtl1_cs.c +++ b/trunk/drivers/bluetooth/dtl1_cs.c @@ -144,9 +144,9 @@ static void dtl1_write_wakeup(dtl1_info_t *info) } do { - unsigned int iobase = info->p_dev->resource[0]->start; + register unsigned int iobase = info->p_dev->resource[0]->start; register struct sk_buff *skb; - int len; + register int len; clear_bit(XMIT_WAKEUP, &(info->tx_state)); diff --git a/trunk/drivers/bluetooth/hci_bcsp.c b/trunk/drivers/bluetooth/hci_bcsp.c index 57e502e06080..661a8dc4d2f8 100644 --- a/trunk/drivers/bluetooth/hci_bcsp.c +++ b/trunk/drivers/bluetooth/hci_bcsp.c @@ -552,7 +552,7 @@ static u16 bscp_get_crc(struct bcsp_struct *bcsp) static int bcsp_recv(struct hci_uart *hu, void *data, int count) { struct bcsp_struct *bcsp = hu->priv; - unsigned char *ptr; + register unsigned char *ptr; BT_DBG("hu %p count %d rx_state %d rx_count %ld", hu, count, bcsp->rx_state, bcsp->rx_count); diff --git a/trunk/drivers/bluetooth/hci_h4.c b/trunk/drivers/bluetooth/hci_h4.c index c60623f206d4..748329468d26 100644 --- a/trunk/drivers/bluetooth/hci_h4.c +++ b/trunk/drivers/bluetooth/hci_h4.c @@ -126,7 +126,7 @@ static int h4_enqueue(struct hci_uart *hu, struct sk_buff *skb) static inline int h4_check_data_len(struct h4_struct *h4, int len) { - int room = skb_tailroom(h4->rx_skb); + register int room = skb_tailroom(h4->rx_skb); BT_DBG("len %d room %d", len, room); diff --git a/trunk/drivers/bluetooth/hci_ldisc.c b/trunk/drivers/bluetooth/hci_ldisc.c index 2f9b796e106e..e564579a6115 100644 --- a/trunk/drivers/bluetooth/hci_ldisc.c +++ b/trunk/drivers/bluetooth/hci_ldisc.c @@ -394,7 +394,7 @@ static int hci_uart_register_dev(struct hci_uart *hu) set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks); if (!test_bit(HCI_UART_RESET_ON_INIT, &hu->hdev_flags)) - set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks); + set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); if (test_bit(HCI_UART_CREATE_AMP, &hu->hdev_flags)) hdev->dev_type = HCI_AMP; diff --git a/trunk/drivers/bluetooth/hci_ll.c b/trunk/drivers/bluetooth/hci_ll.c index ff6d589c34a5..b874c0efde24 100644 --- a/trunk/drivers/bluetooth/hci_ll.c +++ b/trunk/drivers/bluetooth/hci_ll.c @@ -348,7 +348,7 @@ static int ll_enqueue(struct hci_uart *hu, struct sk_buff *skb) static inline int ll_check_data_len(struct ll_struct *ll, int len) { - int room = skb_tailroom(ll->rx_skb); + register int room = skb_tailroom(ll->rx_skb); BT_DBG("len %d room %d", len, room); @@ -374,11 +374,11 @@ static inline int ll_check_data_len(struct ll_struct *ll, int len) static int ll_recv(struct hci_uart *hu, void *data, int count) { struct ll_struct *ll = hu->priv; - char *ptr; + register char *ptr; struct hci_event_hdr *eh; struct hci_acl_hdr *ah; struct hci_sco_hdr *sh; - int len, type, dlen; + register int len, type, dlen; BT_DBG("hu %p count %d rx_state %ld rx_count %ld", hu, count, ll->rx_state, ll->rx_count); diff --git a/trunk/drivers/net/can/c_can/Kconfig b/trunk/drivers/net/can/c_can/Kconfig index 3b83bafcd947..25d371cf98dd 100644 --- a/trunk/drivers/net/can/c_can/Kconfig +++ b/trunk/drivers/net/can/c_can/Kconfig @@ -13,11 +13,4 @@ config CAN_C_CAN_PLATFORM boards from ST Microelectronics (http://www.st.com) like the SPEAr1310 and SPEAr320 evaluation boards & TI (www.ti.com) boards like am335x, dm814x, dm813x and dm811x. - -config CAN_C_CAN_PCI - tristate "Generic PCI Bus based C_CAN/D_CAN driver" - depends on PCI - ---help--- - This driver adds support for the C_CAN/D_CAN chips connected - to the PCI bus. endif diff --git a/trunk/drivers/net/can/c_can/Makefile b/trunk/drivers/net/can/c_can/Makefile index ad1cc842170a..9273f6d5c4b7 100644 --- a/trunk/drivers/net/can/c_can/Makefile +++ b/trunk/drivers/net/can/c_can/Makefile @@ -4,6 +4,5 @@ obj-$(CONFIG_CAN_C_CAN) += c_can.o obj-$(CONFIG_CAN_C_CAN_PLATFORM) += c_can_platform.o -obj-$(CONFIG_CAN_C_CAN_PCI) += c_can_pci.o ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG diff --git a/trunk/drivers/net/can/c_can/c_can_pci.c b/trunk/drivers/net/can/c_can/c_can_pci.c deleted file mode 100644 index 1011146ea513..000000000000 --- a/trunk/drivers/net/can/c_can/c_can_pci.c +++ /dev/null @@ -1,221 +0,0 @@ -/* - * PCI bus driver for Bosch C_CAN/D_CAN controller - * - * Copyright (C) 2012 Federico Vaga - * - * Borrowed from c_can_platform.c - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. - */ - -#include -#include -#include -#include - -#include - -#include "c_can.h" - -enum c_can_pci_reg_align { - C_CAN_REG_ALIGN_16, - C_CAN_REG_ALIGN_32, -}; - -struct c_can_pci_data { - /* Specify if is C_CAN or D_CAN */ - enum c_can_dev_id type; - /* Set the register alignment in the memory */ - enum c_can_pci_reg_align reg_align; - /* Set the frequency */ - unsigned int freq; -}; - -/* - * 16-bit c_can registers can be arranged differently in the memory - * architecture of different implementations. For example: 16-bit - * registers can be aligned to a 16-bit boundary or 32-bit boundary etc. - * Handle the same by providing a common read/write interface. - */ -static u16 c_can_pci_read_reg_aligned_to_16bit(struct c_can_priv *priv, - enum reg index) -{ - return readw(priv->base + priv->regs[index]); -} - -static void c_can_pci_write_reg_aligned_to_16bit(struct c_can_priv *priv, - enum reg index, u16 val) -{ - writew(val, priv->base + priv->regs[index]); -} - -static u16 c_can_pci_read_reg_aligned_to_32bit(struct c_can_priv *priv, - enum reg index) -{ - return readw(priv->base + 2 * priv->regs[index]); -} - -static void c_can_pci_write_reg_aligned_to_32bit(struct c_can_priv *priv, - enum reg index, u16 val) -{ - writew(val, priv->base + 2 * priv->regs[index]); -} - -static int __devinit c_can_pci_probe(struct pci_dev *pdev, - const struct pci_device_id *ent) -{ - struct c_can_pci_data *c_can_pci_data = (void *)ent->driver_data; - struct c_can_priv *priv; - struct net_device *dev; - void __iomem *addr; - int ret; - - ret = pci_enable_device(pdev); - if (ret) { - dev_err(&pdev->dev, "pci_enable_device FAILED\n"); - goto out; - } - - ret = pci_request_regions(pdev, KBUILD_MODNAME); - if (ret) { - dev_err(&pdev->dev, "pci_request_regions FAILED\n"); - goto out_disable_device; - } - - pci_set_master(pdev); - pci_enable_msi(pdev); - - addr = pci_iomap(pdev, 0, pci_resource_len(pdev, 0)); - if (!addr) { - dev_err(&pdev->dev, - "device has no PCI memory resources, " - "failing adapter\n"); - ret = -ENOMEM; - goto out_release_regions; - } - - /* allocate the c_can device */ - dev = alloc_c_can_dev(); - if (!dev) { - ret = -ENOMEM; - goto out_iounmap; - } - - priv = netdev_priv(dev); - pci_set_drvdata(pdev, dev); - SET_NETDEV_DEV(dev, &pdev->dev); - - dev->irq = pdev->irq; - priv->base = addr; - - if (!c_can_pci_data->freq) { - dev_err(&pdev->dev, "no clock frequency defined\n"); - ret = -ENODEV; - goto out_free_c_can; - } else { - priv->can.clock.freq = c_can_pci_data->freq; - } - - /* Configure CAN type */ - switch (c_can_pci_data->type) { - case C_CAN_DEVTYPE: - priv->regs = reg_map_c_can; - break; - case D_CAN_DEVTYPE: - priv->regs = reg_map_d_can; - priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; - break; - default: - ret = -EINVAL; - goto out_free_c_can; - } - - /* Configure access to registers */ - switch (c_can_pci_data->reg_align) { - case C_CAN_REG_ALIGN_32: - priv->read_reg = c_can_pci_read_reg_aligned_to_32bit; - priv->write_reg = c_can_pci_write_reg_aligned_to_32bit; - break; - case C_CAN_REG_ALIGN_16: - priv->read_reg = c_can_pci_read_reg_aligned_to_16bit; - priv->write_reg = c_can_pci_write_reg_aligned_to_16bit; - break; - default: - ret = -EINVAL; - goto out_free_c_can; - } - - ret = register_c_can_dev(dev); - if (ret) { - dev_err(&pdev->dev, "registering %s failed (err=%d)\n", - KBUILD_MODNAME, ret); - goto out_free_c_can; - } - - dev_dbg(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n", - KBUILD_MODNAME, priv->regs, dev->irq); - - return 0; - -out_free_c_can: - pci_set_drvdata(pdev, NULL); - free_c_can_dev(dev); -out_iounmap: - pci_iounmap(pdev, addr); -out_release_regions: - pci_disable_msi(pdev); - pci_clear_master(pdev); - pci_release_regions(pdev); -out_disable_device: - pci_disable_device(pdev); -out: - return ret; -} - -static void __devexit c_can_pci_remove(struct pci_dev *pdev) -{ - struct net_device *dev = pci_get_drvdata(pdev); - struct c_can_priv *priv = netdev_priv(dev); - - unregister_c_can_dev(dev); - - pci_set_drvdata(pdev, NULL); - free_c_can_dev(dev); - - pci_iounmap(pdev, priv->base); - pci_disable_msi(pdev); - pci_clear_master(pdev); - pci_release_regions(pdev); - pci_disable_device(pdev); -} - -static struct c_can_pci_data c_can_sta2x11= { - .type = C_CAN_DEVTYPE, - .reg_align = C_CAN_REG_ALIGN_32, - .freq = 52000000, /* 52 Mhz */ -}; - -#define C_CAN_ID(_vend, _dev, _driverdata) { \ - PCI_DEVICE(_vend, _dev), \ - .driver_data = (unsigned long)&_driverdata, \ -} -static DEFINE_PCI_DEVICE_TABLE(c_can_pci_tbl) = { - C_CAN_ID(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_CAN, - c_can_sta2x11), - {}, -}; -static struct pci_driver c_can_pci_driver = { - .name = KBUILD_MODNAME, - .id_table = c_can_pci_tbl, - .probe = c_can_pci_probe, - .remove = __devexit_p(c_can_pci_remove), -}; - -module_pci_driver(c_can_pci_driver); - -MODULE_AUTHOR("Federico Vaga "); -MODULE_LICENSE("GPL v2"); -MODULE_DESCRIPTION("PCI CAN bus driver for Bosch C_CAN/D_CAN controller"); -MODULE_DEVICE_TABLE(pci, c_can_pci_tbl); diff --git a/trunk/drivers/net/can/dev.c b/trunk/drivers/net/can/dev.c index 239e4dd92ca1..f03d7a481a80 100644 --- a/trunk/drivers/net/can/dev.c +++ b/trunk/drivers/net/can/dev.c @@ -33,39 +33,6 @@ MODULE_DESCRIPTION(MOD_DESC); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Wolfgang Grandegger "); -/* CAN DLC to real data length conversion helpers */ - -static const u8 dlc2len[] = {0, 1, 2, 3, 4, 5, 6, 7, - 8, 12, 16, 20, 24, 32, 48, 64}; - -/* get data length from can_dlc with sanitized can_dlc */ -u8 can_dlc2len(u8 can_dlc) -{ - return dlc2len[can_dlc & 0x0F]; -} -EXPORT_SYMBOL_GPL(can_dlc2len); - -static const u8 len2dlc[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, /* 0 - 8 */ - 9, 9, 9, 9, /* 9 - 12 */ - 10, 10, 10, 10, /* 13 - 16 */ - 11, 11, 11, 11, /* 17 - 20 */ - 12, 12, 12, 12, /* 21 - 24 */ - 13, 13, 13, 13, 13, 13, 13, 13, /* 25 - 32 */ - 14, 14, 14, 14, 14, 14, 14, 14, /* 33 - 40 */ - 14, 14, 14, 14, 14, 14, 14, 14, /* 41 - 48 */ - 15, 15, 15, 15, 15, 15, 15, 15, /* 49 - 56 */ - 15, 15, 15, 15, 15, 15, 15, 15}; /* 57 - 64 */ - -/* map the sanitized data length to an appropriate data length code */ -u8 can_len2dlc(u8 len) -{ - if (unlikely(len > 64)) - return 0xF; - - return len2dlc[len]; -} -EXPORT_SYMBOL_GPL(can_len2dlc); - #ifdef CONFIG_CAN_CALC_BITTIMING #define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */ @@ -487,7 +454,7 @@ EXPORT_SYMBOL_GPL(can_bus_off); static void can_setup(struct net_device *dev) { dev->type = ARPHRD_CAN; - dev->mtu = CAN_MTU; + dev->mtu = sizeof(struct can_frame); dev->hard_header_len = 0; dev->addr_len = 0; dev->tx_queue_len = 10; diff --git a/trunk/drivers/net/can/vcan.c b/trunk/drivers/net/can/vcan.c index 4f93c0be0053..ea2d94285936 100644 --- a/trunk/drivers/net/can/vcan.c +++ b/trunk/drivers/net/can/vcan.c @@ -70,12 +70,13 @@ MODULE_PARM_DESC(echo, "Echo sent frames (for testing). Default: 0 (Off)"); static void vcan_rx(struct sk_buff *skb, struct net_device *dev) { - struct canfd_frame *cfd = (struct canfd_frame *)skb->data; + struct can_frame *cf = (struct can_frame *)skb->data; struct net_device_stats *stats = &dev->stats; stats->rx_packets++; - stats->rx_bytes += cfd->len; + stats->rx_bytes += cf->can_dlc; + skb->protocol = htons(ETH_P_CAN); skb->pkt_type = PACKET_BROADCAST; skb->dev = dev; skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -85,7 +86,7 @@ static void vcan_rx(struct sk_buff *skb, struct net_device *dev) static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev) { - struct canfd_frame *cfd = (struct canfd_frame *)skb->data; + struct can_frame *cf = (struct can_frame *)skb->data; struct net_device_stats *stats = &dev->stats; int loop; @@ -93,7 +94,7 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; stats->tx_packets++; - stats->tx_bytes += cfd->len; + stats->tx_bytes += cf->can_dlc; /* set flag whether this packet has to be looped back */ loop = skb->pkt_type == PACKET_LOOPBACK; @@ -107,7 +108,7 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev) * CAN core already did the echo for us */ stats->rx_packets++; - stats->rx_bytes += cfd->len; + stats->rx_bytes += cf->can_dlc; } kfree_skb(skb); return NETDEV_TX_OK; @@ -132,28 +133,14 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } -static int vcan_change_mtu(struct net_device *dev, int new_mtu) -{ - /* Do not allow changing the MTU while running */ - if (dev->flags & IFF_UP) - return -EBUSY; - - if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU) - return -EINVAL; - - dev->mtu = new_mtu; - return 0; -} - static const struct net_device_ops vcan_netdev_ops = { .ndo_start_xmit = vcan_tx, - .ndo_change_mtu = vcan_change_mtu, }; static void vcan_setup(struct net_device *dev) { dev->type = ARPHRD_CAN; - dev->mtu = CAN_MTU; + dev->mtu = sizeof(struct can_frame); dev->hard_header_len = 0; dev->addr_len = 0; dev->tx_queue_len = 0; diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 7211cb07426e..7de824184979 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -23,8 +23,8 @@ * (you will need to reboot afterwards) */ /* #define BNX2X_STOP_ON_ERROR */ -#define DRV_MODULE_VERSION "1.72.51-0" -#define DRV_MODULE_RELDATE "2012/06/18" +#define DRV_MODULE_VERSION "1.72.50-0" +#define DRV_MODULE_RELDATE "2012/04/23" #define BNX2X_BC_VER 0x040200 #if defined(CONFIG_DCB) @@ -248,12 +248,13 @@ enum { BNX2X_MAX_CNIC_ETH_CL_ID_IDX, }; -#define BNX2X_CNIC_START_ETH_CID(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) *\ - (bp)->max_cos) +#define BNX2X_CNIC_START_ETH_CID 48 +enum { /* iSCSI L2 */ -#define BNX2X_ISCSI_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp)) + BNX2X_ISCSI_ETH_CID = BNX2X_CNIC_START_ETH_CID, /* FCoE L2 */ -#define BNX2X_FCOE_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp) + 1) + BNX2X_FCOE_ETH_CID, +}; /** Additional rings budgeting */ #ifdef BCM_CNIC @@ -275,30 +276,29 @@ enum { #define FIRST_TX_ONLY_COS_INDEX 1 #define FIRST_TX_COS_INDEX 0 +/* defines for decodeing the fastpath index and the cos index out of the + * transmission queue index + */ +#define MAX_TXQS_PER_COS FP_SB_MAX_E1x + +#define TXQ_TO_FP(txq_index) ((txq_index) % MAX_TXQS_PER_COS) +#define TXQ_TO_COS(txq_index) ((txq_index) / MAX_TXQS_PER_COS) + /* rules for calculating the cids of tx-only connections */ -#define CID_TO_FP(cid, bp) ((cid) % BNX2X_NUM_NON_CNIC_QUEUES(bp)) -#define CID_COS_TO_TX_ONLY_CID(cid, cos, bp) \ - (cid + cos * BNX2X_NUM_NON_CNIC_QUEUES(bp)) +#define CID_TO_FP(cid) ((cid) % MAX_TXQS_PER_COS) +#define CID_COS_TO_TX_ONLY_CID(cid, cos) (cid + cos * MAX_TXQS_PER_COS) /* fp index inside class of service range */ -#define FP_COS_TO_TXQ(fp, cos, bp) \ - ((fp)->index + cos * BNX2X_NUM_NON_CNIC_QUEUES(bp)) - -/* Indexes for transmission queues array: - * txdata for RSS i CoS j is at location i + (j * num of RSS) - * txdata for FCoE (if exist) is at location max cos * num of RSS - * txdata for FWD (if exist) is one location after FCoE - * txdata for OOO (if exist) is one location after FWD +#define FP_COS_TO_TXQ(fp, cos) ((fp)->index + cos * MAX_TXQS_PER_COS) + +/* + * 0..15 eth cos0 + * 16..31 eth cos1 if applicable + * 32..47 eth cos2 If applicable + * fcoe queue follows eth queues (16, 32, 48 depending on cos) */ -enum { - FCOE_TXQ_IDX_OFFSET, - FWD_TXQ_IDX_OFFSET, - OOO_TXQ_IDX_OFFSET, -}; -#define MAX_ETH_TXQ_IDX(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) * (bp)->max_cos) -#ifdef BCM_CNIC -#define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp) + FCOE_TXQ_IDX_OFFSET) -#endif +#define MAX_ETH_TXQ_IDX(bp) (MAX_TXQS_PER_COS * (bp)->max_cos) +#define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp)) /* fast path */ /* @@ -481,8 +481,6 @@ struct bnx2x_fp_txdata { __le16 *tx_cons_sb; int txq_index; - struct bnx2x_fastpath *parent_fp; - int tx_ring_size; }; enum bnx2x_tpa_mode_t { @@ -509,7 +507,7 @@ struct bnx2x_fastpath { enum bnx2x_tpa_mode_t mode; u8 max_cos; /* actual number of active tx coses */ - struct bnx2x_fp_txdata *txdata_ptr[BNX2X_MULTI_TX_COS]; + struct bnx2x_fp_txdata txdata[BNX2X_MULTI_TX_COS]; struct sw_rx_bd *rx_buf_ring; /* BDs mappings ring */ struct sw_rx_page *rx_page_ring; /* SGE pages mappings ring */ @@ -549,45 +547,51 @@ struct bnx2x_fastpath { rx_calls; /* TPA related */ - struct bnx2x_agg_info *tpa_info; + struct bnx2x_agg_info tpa_info[ETH_MAX_AGGREGATION_QUEUES_E1H_E2]; u8 disable_tpa; #ifdef BNX2X_STOP_ON_ERROR u64 tpa_queue_used; #endif + + struct tstorm_per_queue_stats old_tclient; + struct ustorm_per_queue_stats old_uclient; + struct xstorm_per_queue_stats old_xclient; + struct bnx2x_eth_q_stats eth_q_stats; + struct bnx2x_eth_q_stats_old eth_q_stats_old; + /* The size is calculated using the following: sizeof name field from netdev structure + 4 ('-Xx-' string) + 4 (for the digits and to make it DWORD aligned) */ #define FP_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8) char name[FP_NAME_SIZE]; + + /* MACs object */ + struct bnx2x_vlan_mac_obj mac_obj; + + /* Queue State object */ + struct bnx2x_queue_sp_obj q_obj; + }; -#define bnx2x_fp(bp, nr, var) ((bp)->fp[(nr)].var) -#define bnx2x_sp_obj(bp, fp) ((bp)->sp_objs[(fp)->index]) -#define bnx2x_fp_stats(bp, fp) (&((bp)->fp_stats[(fp)->index])) -#define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats)) +#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var) /* Use 2500 as a mini-jumbo MTU for FCoE */ #define BNX2X_FCOE_MINI_JUMBO_MTU 2500 -#define FCOE_IDX_OFFSET 0 - -#define FCOE_IDX(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) + \ - FCOE_IDX_OFFSET) -#define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX(bp)]) -#define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var) -#define bnx2x_fcoe_inner_sp_obj(bp) (&bp->sp_objs[FCOE_IDX(bp)]) -#define bnx2x_fcoe_sp_obj(bp, var) (bnx2x_fcoe_inner_sp_obj(bp)->var) -#define bnx2x_fcoe_tx(bp, var) (bnx2x_fcoe_fp(bp)-> \ - txdata_ptr[FIRST_TX_COS_INDEX] \ - ->var) +/* FCoE L2 `fastpath' entry is right after the eth entries */ +#define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp) +#define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX]) +#define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var) +#define bnx2x_fcoe_tx(bp, var) (bnx2x_fcoe_fp(bp)-> \ + txdata[FIRST_TX_COS_INDEX].var) #define IS_ETH_FP(fp) (fp->index < \ BNX2X_NUM_ETH_QUEUES(fp->bp)) #ifdef BCM_CNIC -#define IS_FCOE_FP(fp) (fp->index == FCOE_IDX(fp->bp)) -#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX(bp)) +#define IS_FCOE_FP(fp) (fp->index == FCOE_IDX) +#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX) #else #define IS_FCOE_FP(fp) false #define IS_FCOE_IDX(idx) false @@ -974,8 +978,8 @@ union cdu_context { }; /* CDU host DB constants */ -#define CDU_ILT_PAGE_SZ_HW 2 -#define CDU_ILT_PAGE_SZ (8192 << CDU_ILT_PAGE_SZ_HW) /* 32K */ +#define CDU_ILT_PAGE_SZ_HW 3 +#define CDU_ILT_PAGE_SZ (8192 << CDU_ILT_PAGE_SZ_HW) /* 64K */ #define ILT_PAGE_CIDS (CDU_ILT_PAGE_SZ / sizeof(union cdu_context)) #ifdef BCM_CNIC @@ -1178,31 +1182,11 @@ struct bnx2x_prev_path_list { struct list_head list; }; -struct bnx2x_sp_objs { - /* MACs object */ - struct bnx2x_vlan_mac_obj mac_obj; - - /* Queue State object */ - struct bnx2x_queue_sp_obj q_obj; -}; - -struct bnx2x_fp_stats { - struct tstorm_per_queue_stats old_tclient; - struct ustorm_per_queue_stats old_uclient; - struct xstorm_per_queue_stats old_xclient; - struct bnx2x_eth_q_stats eth_q_stats; - struct bnx2x_eth_q_stats_old eth_q_stats_old; -}; - struct bnx2x { /* Fields used in the tx and intr/napi performance paths * are grouped together in the beginning of the structure */ struct bnx2x_fastpath *fp; - struct bnx2x_sp_objs *sp_objs; - struct bnx2x_fp_stats *fp_stats; - struct bnx2x_fp_txdata *bnx2x_txq; - int bnx2x_txq_size; void __iomem *regview; void __iomem *doorbells; u16 db_size; @@ -1318,7 +1302,6 @@ struct bnx2x { #define NO_FCOE_FLAG (1 << 15) #define BC_SUPPORTS_PFC_STATS (1 << 17) #define USING_SINGLE_MSIX_FLAG (1 << 20) -#define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21) #define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG) #define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG) @@ -1394,7 +1377,6 @@ struct bnx2x { #define BNX2X_MAX_COS 3 #define BNX2X_MAX_TX_COS 2 int num_queues; - int num_napi_queues; int disable_tpa; u32 rx_mode; @@ -1407,7 +1389,6 @@ struct bnx2x { u8 igu_dsb_id; u8 igu_base_sb; u8 igu_sb_cnt; - dma_addr_t def_status_blk_mapping; struct bnx2x_slowpath *slowpath; @@ -1439,11 +1420,7 @@ struct bnx2x { dma_addr_t fw_stats_data_mapping; int fw_stats_data_sz; - /* For max 196 cids (64*3 + non-eth), 32KB ILT page size and 1KB - * context size we need 8 ILT entries. - */ -#define ILT_MAX_L2_LINES 8 - struct hw_context context[ILT_MAX_L2_LINES]; + struct hw_context context; struct bnx2x_ilt *ilt; #define BP_ILT(bp) ((bp)->ilt) @@ -1456,14 +1433,13 @@ struct bnx2x { /* * Maximum CID count that might be required by the bnx2x: - * Max RSS * Max_Tx_Multi_Cos + FCoE + iSCSI + * Max Tss * Max_Tx_Multi_Cos + CNIC L2 Clients (FCoE and iSCSI related) */ -#define BNX2X_L2_CID_COUNT(bp) (BNX2X_NUM_ETH_QUEUES(bp) * BNX2X_MULTI_TX_COS \ - + NON_ETH_CONTEXT_USE + CNIC_PRESENT) -#define BNX2X_L2_MAX_CID(bp) (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS \ - + NON_ETH_CONTEXT_USE + CNIC_PRESENT) +#define BNX2X_L2_CID_COUNT(bp) (MAX_TXQS_PER_COS * BNX2X_MULTI_TX_COS +\ + NON_ETH_CONTEXT_USE + CNIC_PRESENT) #define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\ ILT_PAGE_CIDS)) +#define BNX2X_DB_SIZE(bp) (BNX2X_L2_CID_COUNT(bp) * (1 << BNX2X_DB_SHIFT)) int qm_cid_count; @@ -1622,8 +1598,6 @@ struct bnx2x { extern int num_queues; #define BNX2X_NUM_QUEUES(bp) (bp->num_queues) #define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE) -#define BNX2X_NUM_NON_CNIC_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - \ - NON_ETH_CONTEXT_USE) #define BNX2X_NUM_RX_QUEUES(bp) BNX2X_NUM_QUEUES(bp) #define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1) @@ -1682,9 +1656,6 @@ struct bnx2x_func_init_params { continue; \ else -#define for_each_napi_rx_queue(bp, var) \ - for ((var) = 0; (var) < bp->num_napi_queues; (var)++) - /* Skip OOO FP */ #define for_each_tx_queue(bp, var) \ for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ @@ -1846,7 +1817,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, #define LOAD_NORMAL 0 #define LOAD_OPEN 1 #define LOAD_DIAG 2 -#define LOAD_LOOPBACK_EXT 3 #define UNLOAD_NORMAL 0 #define UNLOAD_CLOSE 1 #define UNLOAD_RECOVERY 2 @@ -1929,17 +1899,13 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, #define PCICFG_LINK_SPEED 0xf0000 #define PCICFG_LINK_SPEED_SHIFT 16 -#define BNX2X_NUM_TESTS_SF 7 -#define BNX2X_NUM_TESTS_MF 3 -#define BNX2X_NUM_TESTS(bp) (IS_MF(bp) ? BNX2X_NUM_TESTS_MF : \ - BNX2X_NUM_TESTS_SF) + +#define BNX2X_NUM_TESTS 7 #define BNX2X_PHY_LOOPBACK 0 #define BNX2X_MAC_LOOPBACK 1 -#define BNX2X_EXT_LOOPBACK 2 #define BNX2X_PHY_LOOPBACK_FAILED 1 #define BNX2X_MAC_LOOPBACK_FAILED 2 -#define BNX2X_EXT_LOOPBACK_FAILED 3 #define BNX2X_LOOPBACK_FAILED (BNX2X_MAC_LOOPBACK_FAILED | \ BNX2X_PHY_LOOPBACK_FAILED) diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 00951b3aa62b..8098eea9704d 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -40,19 +40,12 @@ * Makes sure the contents of the bp->fp[to].napi is kept * intact. This is done by first copying the napi struct from * the target to the source, and then mem copying the entire - * source onto the target. Update txdata pointers and related - * content. + * source onto the target */ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) { struct bnx2x_fastpath *from_fp = &bp->fp[from]; struct bnx2x_fastpath *to_fp = &bp->fp[to]; - struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from]; - struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to]; - struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from]; - struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to]; - int old_max_eth_txqs, new_max_eth_txqs; - int old_txdata_index = 0, new_txdata_index = 0; /* Copy the NAPI object as it has been already initialized */ from_fp->napi = to_fp->napi; @@ -60,30 +53,6 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) /* Move bnx2x_fastpath contents */ memcpy(to_fp, from_fp, sizeof(*to_fp)); to_fp->index = to; - - /* move sp_objs contents as well, as their indices match fp ones */ - memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs)); - - /* move fp_stats contents as well, as their indices match fp ones */ - memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats)); - - /* Update txdata pointers in fp and move txdata content accordingly: - * Each fp consumes 'max_cos' txdata structures, so the index should be - * decremented by max_cos x delta. - */ - - old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos; - new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) * - (bp)->max_cos; - if (from == FCOE_IDX(bp)) { - old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET; - new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET; - } - - memcpy(&bp->bnx2x_txq[old_txdata_index], - &bp->bnx2x_txq[new_txdata_index], - sizeof(struct bnx2x_fp_txdata)); - to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index]; } int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */ @@ -510,7 +479,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, where we are and drop the whole packet */ err = bnx2x_alloc_rx_sge(bp, fp, sge_idx); if (unlikely(err)) { - bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; + fp->eth_q_stats.rx_skb_alloc_failed++; return err; } @@ -615,7 +584,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, /* drop the packet and keep the buffer in the bin */ DP(NETIF_MSG_RX_STATUS, "Failed to allocate or map a new skb - dropping packet!\n"); - bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++; + fp->eth_q_stats.rx_skb_alloc_failed++; } static int bnx2x_alloc_rx_data(struct bnx2x *bp, @@ -648,10 +617,8 @@ static int bnx2x_alloc_rx_data(struct bnx2x *bp, return 0; } -static -void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe, - struct bnx2x_fastpath *fp, - struct bnx2x_eth_q_stats *qstats) +static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe, + struct bnx2x_fastpath *fp) { /* Do nothing if no IP/L4 csum validation was done */ @@ -665,7 +632,7 @@ void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe, if (cqe->fast_path_cqe.type_error_flags & (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG | ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) - qstats->hw_csum_err++; + fp->eth_q_stats.hw_csum_err++; else skb->ip_summed = CHECKSUM_UNNECESSARY; } @@ -809,7 +776,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS, "ERROR flags %x rx packet %u\n", cqe_fp_flags, sw_comp_cons); - bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++; + fp->eth_q_stats.rx_err_discard_pkt++; goto reuse_rx; } @@ -822,7 +789,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) if (skb == NULL) { DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS, "ERROR packet dropped because of alloc failure\n"); - bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; + fp->eth_q_stats.rx_skb_alloc_failed++; goto reuse_rx; } memcpy(skb->data, data + pad, len); @@ -836,15 +803,14 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) skb = build_skb(data, 0); if (unlikely(!skb)) { kfree(data); - bnx2x_fp_qstats(bp, fp)-> - rx_skb_alloc_failed++; + fp->eth_q_stats.rx_skb_alloc_failed++; goto next_rx; } skb_reserve(skb, pad); } else { DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS, "ERROR packet dropped because of alloc failure\n"); - bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; + fp->eth_q_stats.rx_skb_alloc_failed++; reuse_rx: bnx2x_reuse_rx_data(fp, bd_cons, bd_prod); goto next_rx; @@ -860,8 +826,8 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) skb_checksum_none_assert(skb); if (bp->dev->features & NETIF_F_RXCSUM) - bnx2x_csum_validate(skb, cqe, fp, - bnx2x_fp_qstats(bp, fp)); + bnx2x_csum_validate(skb, cqe, fp); + skb_record_rx_queue(skb, fp->rx_queue); @@ -922,7 +888,7 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie) prefetch(fp->rx_cons_sb); for_each_cos_in_tx_queue(fp, cos) - prefetch(fp->txdata_ptr[cos]->tx_cons_sb); + prefetch(fp->txdata[cos].tx_cons_sb); prefetch(&fp->sb_running_index[SM_RX_ID]); napi_schedule(&bnx2x_fp(bp, fp->index, napi)); @@ -1239,7 +1205,7 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp) for_each_tx_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; for_each_cos_in_tx_queue(fp, cos) { - struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; + struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; unsigned pkts_compl = 0, bytes_compl = 0; u16 sw_prod = txdata->tx_pkt_prod; @@ -1251,8 +1217,7 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp) sw_cons++; } netdev_tx_reset_queue( - netdev_get_tx_queue(bp->dev, - txdata->txq_index)); + netdev_get_tx_queue(bp->dev, txdata->txq_index)); } } } @@ -1360,7 +1325,7 @@ void bnx2x_free_irq(struct bnx2x *bp) free_irq(bp->dev->irq, bp->dev); } -int bnx2x_enable_msix(struct bnx2x *bp) +int __devinit bnx2x_enable_msix(struct bnx2x *bp) { int msix_vec = 0, i, rc, req_cnt; @@ -1614,8 +1579,6 @@ void bnx2x_set_num_queues(struct bnx2x *bp) #endif /* Add special queues */ bp->num_queues += NON_ETH_CONTEXT_USE; - - BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues); } /** @@ -1644,8 +1607,8 @@ static int bnx2x_set_real_num_queues(struct bnx2x *bp) { int rc, tx, rx; - tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos; - rx = BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE; + tx = MAX_TXQS_PER_COS * bp->max_cos; + rx = BNX2X_NUM_ETH_QUEUES(bp); /* account for fcoe queue */ #ifdef BCM_CNIC @@ -1703,13 +1666,14 @@ static void bnx2x_set_rx_buf_size(struct bnx2x *bp) static int bnx2x_init_rss_pf(struct bnx2x *bp) { int i; + u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0}; u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); /* Prepare the initial contents fo the indirection table if RSS is * enabled */ - for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++) - bp->rss_conf_obj.ind_table[i] = + for (i = 0; i < sizeof(ind_table); i++) + ind_table[i] = bp->fp->cl_id + ethtool_rxfh_indir_default(i, num_eth_queues); @@ -1721,11 +1685,12 @@ static int bnx2x_init_rss_pf(struct bnx2x *bp) * For 57712 and newer on the other hand it's a per-function * configuration. */ - return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp)); + return bnx2x_config_rss_eth(bp, ind_table, + bp->port.pmf || !CHIP_IS_E1x(bp)); } int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, - bool config_hash) + u8 *ind_table, bool config_hash) { struct bnx2x_config_rss_params params = {NULL}; int i; @@ -1748,15 +1713,11 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags); __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags); __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags); - if (rss_obj->udp_rss_v4) - __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags); - if (rss_obj->udp_rss_v6) - __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags); /* Hash bits */ params.rss_result_mask = MULTI_MASK; - memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table)); + memcpy(params.ind_table, ind_table, sizeof(params.ind_table)); if (config_hash) { /* RSS keys */ @@ -1793,7 +1754,7 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp) int rc; unsigned long ramrod_flags = 0, vlan_mac_flags = 0; struct bnx2x_mcast_ramrod_params rparam = {NULL}; - struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj; + struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj; /***************** Cleanup MACs' object first *************************/ @@ -1804,7 +1765,7 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp) /* Clean ETH primary MAC */ __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags); - rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags, + rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags, &ramrod_flags); if (rc != 0) BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc); @@ -1890,16 +1851,11 @@ bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err) static void bnx2x_bz_fp(struct bnx2x *bp, int index) { struct bnx2x_fastpath *fp = &bp->fp[index]; - struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[index]; - - int cos; struct napi_struct orig_napi = fp->napi; - struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info; /* bzero bnx2x_fastpath contents */ - if (bp->stats_init) { - memset(fp->tpa_info, 0, sizeof(*fp->tpa_info)); + if (bp->stats_init) memset(fp, 0, sizeof(*fp)); - } else { + else { /* Keep Queue statistics */ struct bnx2x_eth_q_stats *tmp_eth_q_stats; struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old; @@ -1907,27 +1863,26 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index) tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats), GFP_KERNEL); if (tmp_eth_q_stats) - memcpy(tmp_eth_q_stats, &fp_stats->eth_q_stats, + memcpy(tmp_eth_q_stats, &fp->eth_q_stats, sizeof(struct bnx2x_eth_q_stats)); tmp_eth_q_stats_old = kzalloc(sizeof(struct bnx2x_eth_q_stats_old), GFP_KERNEL); if (tmp_eth_q_stats_old) - memcpy(tmp_eth_q_stats_old, &fp_stats->eth_q_stats_old, + memcpy(tmp_eth_q_stats_old, &fp->eth_q_stats_old, sizeof(struct bnx2x_eth_q_stats_old)); - memset(fp->tpa_info, 0, sizeof(*fp->tpa_info)); memset(fp, 0, sizeof(*fp)); if (tmp_eth_q_stats) { - memcpy(&fp_stats->eth_q_stats, tmp_eth_q_stats, - sizeof(struct bnx2x_eth_q_stats)); + memcpy(&fp->eth_q_stats, tmp_eth_q_stats, + sizeof(struct bnx2x_eth_q_stats)); kfree(tmp_eth_q_stats); } if (tmp_eth_q_stats_old) { - memcpy(&fp_stats->eth_q_stats_old, tmp_eth_q_stats_old, + memcpy(&fp->eth_q_stats_old, tmp_eth_q_stats_old, sizeof(struct bnx2x_eth_q_stats_old)); kfree(tmp_eth_q_stats_old); } @@ -1936,7 +1891,7 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index) /* Restore the NAPI object as it has been already initialized */ fp->napi = orig_napi; - fp->tpa_info = orig_tpa_info; + fp->bp = bp; fp->index = index; if (IS_ETH_FP(fp)) @@ -1945,16 +1900,6 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index) /* Special queues support only one CoS */ fp->max_cos = 1; - /* Init txdata pointers */ -#ifdef BCM_CNIC - if (IS_FCOE_FP(fp)) - fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)]; -#endif - if (IS_ETH_FP(fp)) - for_each_cos_in_tx_queue(fp, cos) - fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos * - BNX2X_NUM_ETH_QUEUES(bp) + index]; - /* * set the tpa flag for each queue. The tpa flag determines the queue * minimal size so it must be set prior to queue memory allocation @@ -2004,13 +1949,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) /* * Zero fastpath structures preserving invariants like napi, which are * allocated only once, fp index, max_cos, bp pointer. - * Also set fp->disable_tpa and txdata_ptr. + * Also set fp->disable_tpa. */ DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues); for_each_queue(bp, i) bnx2x_bz_fp(bp, i); - memset(bp->bnx2x_txq, 0, bp->bnx2x_txq_size * - sizeof(struct bnx2x_fp_txdata)); /* Set the receive queues buffer size */ @@ -2233,7 +2176,6 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) break; case LOAD_DIAG: - case LOAD_LOOPBACK_EXT: bp->state = BNX2X_STATE_DIAG; break; @@ -2253,7 +2195,6 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) /* re-read iscsi info */ bnx2x_get_iscsi_info(bp); bnx2x_setup_cnic_irq_info(bp); - bnx2x_setup_cnic_info(bp); if (bp->state == BNX2X_STATE_OPEN) bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD); #endif @@ -2274,10 +2215,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) return -EBUSY; } - /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */ - if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG)) - bnx2x_dcbx_init(bp, false); - + bnx2x_dcbx_init(bp); return 0; #ifndef BNX2X_STOP_ON_ERROR @@ -2360,7 +2298,6 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) /* Stop Tx */ bnx2x_tx_disable(bp); - netdev_reset_tc(bp->dev); #ifdef BCM_CNIC bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); @@ -2519,8 +2456,8 @@ int bnx2x_poll(struct napi_struct *napi, int budget) #endif for_each_cos_in_tx_queue(fp, cos) - if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos])) - bnx2x_tx_int(bp, fp->txdata_ptr[cos]); + if (bnx2x_tx_queue_has_work(&fp->txdata[cos])) + bnx2x_tx_int(bp, &fp->txdata[cos]); if (bnx2x_has_rx_work(fp)) { @@ -2897,6 +2834,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); + struct bnx2x_fastpath *fp; struct netdev_queue *txq; struct bnx2x_fp_txdata *txdata; struct sw_tx_bd *tx_buf; @@ -2906,7 +2844,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; u32 pbd_e2_parsing_data = 0; u16 pkt_prod, bd_prod; - int nbd, txq_index; + int nbd, txq_index, fp_index, txdata_index; dma_addr_t mapping; u32 xmit_type = bnx2x_xmit_type(bp, skb); int i; @@ -2925,12 +2863,31 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT); - txdata = &bp->bnx2x_txq[txq_index]; + /* decode the fastpath index and the cos index from the txq */ + fp_index = TXQ_TO_FP(txq_index); + txdata_index = TXQ_TO_COS(txq_index); + +#ifdef BCM_CNIC + /* + * Override the above for the FCoE queue: + * - FCoE fp entry is right after the ETH entries. + * - FCoE L2 queue uses bp->txdata[0] only. + */ + if (unlikely(!NO_FCOE(bp) && (txq_index == + bnx2x_fcoe_tx(bp, txq_index)))) { + fp_index = FCOE_IDX; + txdata_index = 0; + } +#endif /* enable this debug print to view the transmission queue being used DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n", txq_index, fp_index, txdata_index); */ + /* locate the fastpath and the txdata */ + fp = &bp->fp[fp_index]; + txdata = &fp->txdata[txdata_index]; + /* enable this debug print to view the tranmission details DP(NETIF_MSG_TX_QUEUED, "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n", @@ -2938,7 +2895,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(bnx2x_tx_avail(bp, txdata) < (skb_shinfo(skb)->nr_frags + 3))) { - bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++; + fp->eth_q_stats.driver_xoff++; netif_tx_stop_queue(txq); BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); return NETDEV_TX_BUSY; @@ -3220,7 +3177,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) * fp->bd_tx_cons */ smp_mb(); - bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++; + fp->eth_q_stats.driver_xoff++; if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4) netif_tx_wake_queue(txq); } @@ -3286,7 +3243,7 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc) /* configure traffic class to transmission queue mapping */ for (cos = 0; cos < bp->max_cos; cos++) { count = BNX2X_NUM_ETH_QUEUES(bp); - offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp); + offset = cos * MAX_TXQS_PER_COS; netdev_set_tc_queue(dev, cos, count, offset); DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, "mapping tc %d to offset %d count %d\n", @@ -3385,7 +3342,7 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index) if (!skip_tx_queue(bp, fp_index)) { /* fastpath tx rings: tx_buf tx_desc */ for_each_cos_in_tx_queue(fp, cos) { - struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; + struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; DP(NETIF_MSG_IFDOWN, "freeing tx memory of fp %d cos %d cid %d\n", @@ -3457,7 +3414,7 @@ static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp, cqe_ring_prod); fp->rx_pkt = fp->rx_calls = 0; - bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt; + fp->eth_q_stats.rx_skb_alloc_failed += failure_cnt; return i - failure_cnt; } @@ -3542,7 +3499,7 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) if (!skip_tx_queue(bp, index)) { /* fastpath tx rings: tx_buf tx_desc */ for_each_cos_in_tx_queue(fp, cos) { - struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; + struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; DP(NETIF_MSG_IFUP, "allocating tx memory of fp %d cos %d\n", @@ -3625,7 +3582,7 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp) #ifdef BCM_CNIC if (!NO_FCOE(bp)) /* FCoE */ - if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp))) + if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX)) /* we will fail load process instead of mark * NO_FCOE_FLAG */ @@ -3650,7 +3607,7 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp) */ /* move FCoE fp even NO_FCOE_FLAG is on */ - bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta); + bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta); #endif bp->num_queues -= delta; BNX2X_ERR("Adjusted num of queues from %d to %d\n", @@ -3662,11 +3619,7 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp) void bnx2x_free_mem_bp(struct bnx2x *bp) { - kfree(bp->fp->tpa_info); kfree(bp->fp); - kfree(bp->sp_objs); - kfree(bp->fp_stats); - kfree(bp->bnx2x_txq); kfree(bp->msix_table); kfree(bp->ilt); } @@ -3677,8 +3630,6 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp) struct msix_entry *tbl; struct bnx2x_ilt *ilt; int msix_table_size = 0; - int fp_array_size; - int i; /* * The biggest MSI-X table we might need is as a maximum number of fast @@ -3687,44 +3638,12 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp) msix_table_size = bp->igu_sb_cnt + 1; /* fp array: RSS plus CNIC related L2 queues */ - fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE; - BNX2X_DEV_INFO("fp_array_size %d", fp_array_size); - - fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL); + fp = kcalloc(BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE, + sizeof(*fp), GFP_KERNEL); if (!fp) goto alloc_err; - for (i = 0; i < fp_array_size; i++) { - fp[i].tpa_info = - kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2, - sizeof(struct bnx2x_agg_info), GFP_KERNEL); - if (!(fp[i].tpa_info)) - goto alloc_err; - } - bp->fp = fp; - /* allocate sp objs */ - bp->sp_objs = kcalloc(fp_array_size, sizeof(struct bnx2x_sp_objs), - GFP_KERNEL); - if (!bp->sp_objs) - goto alloc_err; - - /* allocate fp_stats */ - bp->fp_stats = kcalloc(fp_array_size, sizeof(struct bnx2x_fp_stats), - GFP_KERNEL); - if (!bp->fp_stats) - goto alloc_err; - - /* Allocate memory for the transmission queues array */ - bp->bnx2x_txq_size = BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS; -#ifdef BCM_CNIC - bp->bnx2x_txq_size++; -#endif - bp->bnx2x_txq = kcalloc(bp->bnx2x_txq_size, - sizeof(struct bnx2x_fp_txdata), GFP_KERNEL); - if (!bp->bnx2x_txq) - goto alloc_err; - /* msix table */ tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL); if (!tbl) diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index daa894bd772a..7cd99b75347a 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -29,7 +29,6 @@ extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */ extern int num_queues; -extern int int_mode; /************************ Macros ********************************/ #define BNX2X_PCI_FREE(x, y, size) \ @@ -95,7 +94,7 @@ void bnx2x_send_unload_done(struct bnx2x *bp); * @config_hash: re-configure RSS hash keys configuration */ int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, - bool config_hash); + u8 *ind_table, bool config_hash); /** * bnx2x__init_func_obj - init function object @@ -245,14 +244,6 @@ int bnx2x_cnic_notify(struct bnx2x *bp, int cmd); * @bp: driver handle */ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp); - -/** - * bnx2x_setup_cnic_info - provides cnic with updated info - * - * @bp: driver handle - */ -void bnx2x_setup_cnic_info(struct bnx2x *bp); - #endif /** @@ -418,7 +409,7 @@ void bnx2x_ilt_set_info(struct bnx2x *bp); * * @bp: driver handle */ -void bnx2x_dcbx_init(struct bnx2x *bp, bool update_shmem); +void bnx2x_dcbx_init(struct bnx2x *bp); /** * bnx2x_set_power_state - set power state to the requested value. @@ -496,7 +487,7 @@ void bnx2x_netif_start(struct bnx2x *bp); * fills msix_table, requests vectors, updates num_queues * according to number of available vectors. */ -int bnx2x_enable_msix(struct bnx2x *bp); +int __devinit bnx2x_enable_msix(struct bnx2x *bp); /** * bnx2x_enable_msi - request msi mode from OS, updated internals accordingly @@ -737,7 +728,7 @@ static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath *fp) { u8 cos; for_each_cos_in_tx_queue(fp, cos) - if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos])) + if (bnx2x_tx_queue_has_work(&fp->txdata[cos])) return true; return false; } @@ -789,10 +780,8 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp) { int i; - bp->num_napi_queues = bp->num_queues; - /* Add NAPI objects */ - for_each_napi_rx_queue(bp, i) + for_each_rx_queue(bp, i) netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), bnx2x_poll, BNX2X_NAPI_WEIGHT); } @@ -801,12 +790,10 @@ static inline void bnx2x_del_all_napi(struct bnx2x *bp) { int i; - for_each_napi_rx_queue(bp, i) + for_each_rx_queue(bp, i) netif_napi_del(&bnx2x_fp(bp, i, napi)); } -void bnx2x_set_int_mode(struct bnx2x *bp); - static inline void bnx2x_disable_msi(struct bnx2x *bp) { if (bp->flags & USING_MSIX_FLAG) { @@ -878,9 +865,11 @@ static inline int func_by_vn(struct bnx2x *bp, int vn) return 2 * vn + BP_PORT(bp); } -static inline int bnx2x_config_rss_eth(struct bnx2x *bp, bool config_hash) +static inline int bnx2x_config_rss_eth(struct bnx2x *bp, u8 *ind_table, + bool config_hash) { - return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, config_hash); + return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, ind_table, + config_hash); } /** @@ -986,8 +975,8 @@ static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp, struct bnx2x *bp = fp->bp; /* Configure classification DBs */ - bnx2x_init_mac_obj(bp, &bnx2x_sp_obj(bp, fp).mac_obj, fp->cl_id, - fp->cid, BP_FUNC(bp), bnx2x_sp(bp, mac_rdata), + bnx2x_init_mac_obj(bp, &fp->mac_obj, fp->cl_id, fp->cid, + BP_FUNC(bp), bnx2x_sp(bp, mac_rdata), bnx2x_sp_mapping(bp, mac_rdata), BNX2X_FILTER_MAC_PENDING, &bp->sp_state, obj_type, @@ -1079,14 +1068,12 @@ static inline u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp) } static inline void bnx2x_init_txdata(struct bnx2x *bp, - struct bnx2x_fp_txdata *txdata, u32 cid, - int txq_index, __le16 *tx_cons_sb, - struct bnx2x_fastpath *fp) + struct bnx2x_fp_txdata *txdata, u32 cid, int txq_index, + __le16 *tx_cons_sb) { txdata->cid = cid; txdata->txq_index = txq_index; txdata->tx_cons_sb = tx_cons_sb; - txdata->parent_fp = fp; DP(NETIF_MSG_IFUP, "created tx data cid %d, txq %d\n", txdata->cid, txdata->txq_index); @@ -1120,13 +1107,18 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp) bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp); bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp, BNX2X_FCOE_ETH_CL_ID_IDX); - bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp); + /** Current BNX2X_FCOE_ETH_CID deffinition implies not more than + * 16 ETH clients per function when CNIC is enabled! + * + * Fix it ASAP!!! + */ + bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID; bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID; bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id; bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX; - bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]), - fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX, - fp); + + bnx2x_init_txdata(bp, &bnx2x_fcoe(bp, txdata[0]), + fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX); DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index); @@ -1143,8 +1135,8 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp) /* No multi-CoS for FCoE L2 client */ BUG_ON(fp->max_cos != 1); - bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, - &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata), + bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, &fp->cid, 1, + BP_FUNC(bp), bnx2x_sp(bp, q_rdata), bnx2x_sp_mapping(bp, q_rdata), q_type); DP(NETIF_MSG_IFUP, diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c index 8a73374e52a7..4f9244bd7530 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c @@ -972,26 +972,23 @@ void bnx2x_dcbx_init_params(struct bnx2x *bp) bp->dcbx_config_params.admin_default_priority = 0; } -void bnx2x_dcbx_init(struct bnx2x *bp, bool update_shmem) +void bnx2x_dcbx_init(struct bnx2x *bp) { u32 dcbx_lldp_params_offset = SHMEM_LLDP_DCBX_PARAMS_NONE; - /* only PMF can send ADMIN msg to MFW in old MFW versions */ - if ((!bp->port.pmf) && (!(bp->flags & BC_SUPPORTS_DCBX_MSG_NON_PMF))) - return; - if (bp->dcbx_enabled <= 0) return; /* validate: * chip of good for dcbx version, * dcb is wanted + * the function is pmf * shmem2 contains DCBX support fields */ DP(BNX2X_MSG_DCB, "dcb_state %d bp->port.pmf %d\n", bp->dcb_state, bp->port.pmf); - if (bp->dcb_state == BNX2X_DCB_STATE_ON && + if (bp->dcb_state == BNX2X_DCB_STATE_ON && bp->port.pmf && SHMEM2_HAS(bp, dcbx_lldp_params_offset)) { dcbx_lldp_params_offset = SHMEM2_RD(bp, dcbx_lldp_params_offset); @@ -1002,23 +999,12 @@ void bnx2x_dcbx_init(struct bnx2x *bp, bool update_shmem) bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 0); if (SHMEM_LLDP_DCBX_PARAMS_NONE != dcbx_lldp_params_offset) { - /* need HW lock to avoid scenario of two drivers - * writing in parallel to shmem - */ - bnx2x_acquire_hw_lock(bp, - HW_LOCK_RESOURCE_DCBX_ADMIN_MIB); - if (update_shmem) - bnx2x_dcbx_admin_mib_updated_params(bp, - dcbx_lldp_params_offset); + bnx2x_dcbx_admin_mib_updated_params(bp, + dcbx_lldp_params_offset); /* Let HW start negotiation */ bnx2x_fw_command(bp, DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG, 0); - /* release HW lock only after MFW acks that it finished - * reading values from shmem - */ - bnx2x_release_hw_lock(bp, - HW_LOCK_RESOURCE_DCBX_ADMIN_MIB); } } } @@ -2077,8 +2063,10 @@ static u8 bnx2x_dcbnl_set_all(struct net_device *netdev) "Handling parity error recovery. Try again later\n"); return 1; } - if (netif_running(bp->dev)) - bnx2x_dcbx_init(bp, true); + if (netif_running(bp->dev)) { + bnx2x_nic_unload(bp, UNLOAD_NORMAL); + rc = bnx2x_nic_load(bp, LOAD_NORMAL); + } DP(BNX2X_MSG_DCB, "set_dcbx_params done (%d)\n", rc); if (rc) return 1; diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 1f8c1561cdec..bf30e2829285 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -187,8 +187,7 @@ static int bnx2x_get_port_type(struct bnx2x *bp) int port_type; u32 phy_idx = bnx2x_get_cur_phy_idx(bp); switch (bp->link_params.phy[phy_idx].media_type) { - case ETH_PHY_SFPP_10G_FIBER: - case ETH_PHY_SFP_1G_FIBER: + case ETH_PHY_SFP_FIBER: case ETH_PHY_XFP_FIBER: case ETH_PHY_KR: case ETH_PHY_CX4: @@ -221,11 +220,6 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) (bp->port.supported[cfg_idx ^ 1] & (SUPPORTED_TP | SUPPORTED_FIBRE)); cmd->advertising = bp->port.advertising[cfg_idx]; - if (bp->link_params.phy[bnx2x_get_cur_phy_idx(bp)].media_type == - ETH_PHY_SFP_1G_FIBER) { - cmd->supported &= ~(SUPPORTED_10000baseT_Full); - cmd->advertising &= ~(ADVERTISED_10000baseT_Full); - } if ((bp->state == BNX2X_STATE_OPEN) && (bp->link_vars.link_up)) { if (!(bp->flags & MF_FUNC_DIS)) { @@ -301,7 +295,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct bnx2x *bp = netdev_priv(dev); u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config; - u32 speed, phy_idx; + u32 speed; if (IS_MF_SD(bp)) return 0; @@ -556,11 +550,9 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) "10G half not supported\n"); return -EINVAL; } - phy_idx = bnx2x_get_cur_phy_idx(bp); + if (!(bp->port.supported[cfg_idx] - & SUPPORTED_10000baseT_Full) || - (bp->link_params.phy[phy_idx].media_type == - ETH_PHY_SFP_1G_FIBER)) { + & SUPPORTED_10000baseT_Full)) { DP(BNX2X_MSG_ETHTOOL, "10G full not supported\n"); return -EINVAL; @@ -834,7 +826,7 @@ static void bnx2x_get_drvinfo(struct net_device *dev, ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver); strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); info->n_stats = BNX2X_NUM_STATS; - info->testinfo_len = BNX2X_NUM_TESTS(bp); + info->testinfo_len = BNX2X_NUM_TESTS; info->eedump_len = bp->common.flash_size; info->regdump_len = bnx2x_get_regs_len(dev); } @@ -1160,65 +1152,6 @@ static int bnx2x_get_eeprom(struct net_device *dev, return rc; } -static int bnx2x_get_module_eeprom(struct net_device *dev, - struct ethtool_eeprom *ee, - u8 *data) -{ - struct bnx2x *bp = netdev_priv(dev); - int rc = 0, phy_idx; - u8 *user_data = data; - int remaining_len = ee->len, xfer_size; - unsigned int page_off = ee->offset; - - if (!netif_running(dev)) { - DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, - "cannot access eeprom when the interface is down\n"); - return -EAGAIN; - } - - phy_idx = bnx2x_get_cur_phy_idx(bp); - bnx2x_acquire_phy_lock(bp); - while (!rc && remaining_len > 0) { - xfer_size = (remaining_len > SFP_EEPROM_PAGE_SIZE) ? - SFP_EEPROM_PAGE_SIZE : remaining_len; - rc = bnx2x_read_sfp_module_eeprom(&bp->link_params.phy[phy_idx], - &bp->link_params, - page_off, - xfer_size, - user_data); - remaining_len -= xfer_size; - user_data += xfer_size; - page_off += xfer_size; - } - - bnx2x_release_phy_lock(bp); - return rc; -} - -static int bnx2x_get_module_info(struct net_device *dev, - struct ethtool_modinfo *modinfo) -{ - struct bnx2x *bp = netdev_priv(dev); - int phy_idx; - if (!netif_running(dev)) { - DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, - "cannot access eeprom when the interface is down\n"); - return -EAGAIN; - } - - phy_idx = bnx2x_get_cur_phy_idx(bp); - switch (bp->link_params.phy[phy_idx].media_type) { - case ETH_PHY_SFPP_10G_FIBER: - case ETH_PHY_SFP_1G_FIBER: - case ETH_PHY_DA_TWINAX: - modinfo->type = ETH_MODULE_SFF_8079; - modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; - return 0; - default: - return -EOPNOTSUPP; - } -} - static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val, u32 cmd_flags) { @@ -1600,14 +1533,16 @@ static int bnx2x_set_pauseparam(struct net_device *dev, return 0; } -char *bnx2x_tests_str_arr[BNX2X_NUM_TESTS_SF] = { - "register_test (offline) ", - "memory_test (offline) ", - "int_loopback_test (offline)", - "ext_loopback_test (offline)", - "nvram_test (online) ", - "interrupt_test (online) ", - "link_test (online) " +static const struct { + char string[ETH_GSTRING_LEN]; +} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = { + { "register_test (offline)" }, + { "memory_test (offline)" }, + { "loopback_test (offline)" }, + { "nvram_test (online)" }, + { "interrupt_test (online)" }, + { "link_test (online)" }, + { "idle check (online)" } }; static u32 bnx2x_eee_to_adv(u32 eee_adv) @@ -2008,14 +1943,6 @@ static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up, u8 is_serdes) if (cnt <= 0 && bnx2x_link_test(bp, is_serdes)) DP(BNX2X_MSG_ETHTOOL, "Timeout waiting for link up\n"); - - cnt = 1400; - while (!bp->link_vars.link_up && cnt--) - msleep(20); - - if (cnt <= 0 && !bp->link_vars.link_up) - DP(BNX2X_MSG_ETHTOOL, - "Timeout waiting for link init\n"); } } @@ -2026,7 +1953,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode) unsigned char *packet; struct bnx2x_fastpath *fp_rx = &bp->fp[0]; struct bnx2x_fastpath *fp_tx = &bp->fp[0]; - struct bnx2x_fp_txdata *txdata = fp_tx->txdata_ptr[0]; + struct bnx2x_fp_txdata *txdata = &fp_tx->txdata[0]; u16 tx_start_idx, tx_idx; u16 rx_start_idx, rx_idx; u16 pkt_prod, bd_prod; @@ -2041,16 +1968,13 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode) u16 len; int rc = -ENODEV; u8 *data; - struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, - txdata->txq_index); + struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txdata->txq_index); /* check the loopback mode */ switch (loopback_mode) { case BNX2X_PHY_LOOPBACK: - if (bp->link_params.loopback_mode != LOOPBACK_XGXS) { - DP(BNX2X_MSG_ETHTOOL, "PHY loopback not supported\n"); + if (bp->link_params.loopback_mode != LOOPBACK_XGXS) return -EINVAL; - } break; case BNX2X_MAC_LOOPBACK: if (CHIP_IS_E3(bp)) { @@ -2067,13 +1991,6 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode) bnx2x_phy_init(&bp->link_params, &bp->link_vars); break; - case BNX2X_EXT_LOOPBACK: - if (bp->link_params.loopback_mode != LOOPBACK_EXT) { - DP(BNX2X_MSG_ETHTOOL, - "Can't configure external loopback\n"); - return -EINVAL; - } - break; default: DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n"); return -EINVAL; @@ -2245,38 +2162,6 @@ static int bnx2x_test_loopback(struct bnx2x *bp) return rc; } -static int bnx2x_test_ext_loopback(struct bnx2x *bp) -{ - int rc; - u8 is_serdes = - (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0; - - if (BP_NOMCP(bp)) - return -ENODEV; - - if (!netif_running(bp->dev)) - return BNX2X_EXT_LOOPBACK_FAILED; - - bnx2x_nic_unload(bp, UNLOAD_NORMAL); - rc = bnx2x_nic_load(bp, LOAD_LOOPBACK_EXT); - if (rc) { - DP(BNX2X_MSG_ETHTOOL, - "Can't perform self-test, nic_load (for external lb) failed\n"); - return -ENODEV; - } - bnx2x_wait_for_link(bp, 1, is_serdes); - - bnx2x_netif_stop(bp, 1); - - rc = bnx2x_run_loopback(bp, BNX2X_EXT_LOOPBACK); - if (rc) - DP(BNX2X_MSG_ETHTOOL, "EXT loopback failed (res %d)\n", rc); - - bnx2x_netif_start(bp); - - return rc; -} - #define CRC32_RESIDUAL 0xdebb20e3 static int bnx2x_test_nvram(struct bnx2x *bp) @@ -2359,7 +2244,7 @@ static int bnx2x_test_intr(struct bnx2x *bp) return -ENODEV; } - params.q_obj = &bp->sp_objs->q_obj; + params.q_obj = &bp->fp->q_obj; params.cmd = BNX2X_Q_CMD_EMPTY; __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags); @@ -2372,31 +2257,24 @@ static void bnx2x_self_test(struct net_device *dev, { struct bnx2x *bp = netdev_priv(dev); u8 is_serdes; - int rc; - if (bp->recovery_state != BNX2X_RECOVERY_DONE) { netdev_err(bp->dev, "Handling parity error recovery. Try again later\n"); etest->flags |= ETH_TEST_FL_FAILED; return; } - DP(BNX2X_MSG_ETHTOOL, - "Self-test command parameters: offline = %d, external_lb = %d\n", - (etest->flags & ETH_TEST_FL_OFFLINE), - (etest->flags & ETH_TEST_FL_EXTERNAL_LB)>>2); - memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS(bp)); + memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS); - if (!netif_running(dev)) { - DP(BNX2X_MSG_ETHTOOL, - "Can't perform self-test when interface is down\n"); + if (!netif_running(dev)) return; - } + /* offline tests are not supported in MF mode */ + if (IS_MF(bp)) + etest->flags &= ~ETH_TEST_FL_OFFLINE; is_serdes = (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0; - /* offline tests are not supported in MF mode */ - if ((etest->flags & ETH_TEST_FL_OFFLINE) && !IS_MF(bp)) { + if (etest->flags & ETH_TEST_FL_OFFLINE) { int port = BP_PORT(bp); u32 val; u8 link_up; @@ -2409,14 +2287,7 @@ static void bnx2x_self_test(struct net_device *dev, link_up = bp->link_vars.link_up; bnx2x_nic_unload(bp, UNLOAD_NORMAL); - rc = bnx2x_nic_load(bp, LOAD_DIAG); - if (rc) { - etest->flags |= ETH_TEST_FL_FAILED; - DP(BNX2X_MSG_ETHTOOL, - "Can't perform self-test, nic_load (for offline) failed\n"); - return; - } - + bnx2x_nic_load(bp, LOAD_DIAG); /* wait until link state is restored */ bnx2x_wait_for_link(bp, 1, is_serdes); @@ -2429,51 +2300,30 @@ static void bnx2x_self_test(struct net_device *dev, etest->flags |= ETH_TEST_FL_FAILED; } - buf[2] = bnx2x_test_loopback(bp); /* internal LB */ + buf[2] = bnx2x_test_loopback(bp); if (buf[2] != 0) etest->flags |= ETH_TEST_FL_FAILED; - if (etest->flags & ETH_TEST_FL_EXTERNAL_LB) { - buf[3] = bnx2x_test_ext_loopback(bp); /* external LB */ - if (buf[3] != 0) - etest->flags |= ETH_TEST_FL_FAILED; - etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; - } - bnx2x_nic_unload(bp, UNLOAD_NORMAL); /* restore input for TX port IF */ REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val); - rc = bnx2x_nic_load(bp, LOAD_NORMAL); - if (rc) { - etest->flags |= ETH_TEST_FL_FAILED; - DP(BNX2X_MSG_ETHTOOL, - "Can't perform self-test, nic_load (for online) failed\n"); - return; - } + + bnx2x_nic_load(bp, LOAD_NORMAL); /* wait until link state is restored */ bnx2x_wait_for_link(bp, link_up, is_serdes); } if (bnx2x_test_nvram(bp) != 0) { - if (!IS_MF(bp)) - buf[4] = 1; - else - buf[0] = 1; + buf[3] = 1; etest->flags |= ETH_TEST_FL_FAILED; } if (bnx2x_test_intr(bp) != 0) { - if (!IS_MF(bp)) - buf[5] = 1; - else - buf[1] = 1; + buf[4] = 1; etest->flags |= ETH_TEST_FL_FAILED; } if (bnx2x_link_test(bp, is_serdes) != 0) { - if (!IS_MF(bp)) - buf[6] = 1; - else - buf[2] = 1; + buf[5] = 1; etest->flags |= ETH_TEST_FL_FAILED; } @@ -2518,7 +2368,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset) return num_stats; case ETH_SS_TEST: - return BNX2X_NUM_TESTS(bp); + return BNX2X_NUM_TESTS; default: return -EINVAL; @@ -2528,7 +2378,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset) static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) { struct bnx2x *bp = netdev_priv(dev); - int i, j, k, offset, start; + int i, j, k; char queue_name[MAX_QUEUE_NAME_LEN+1]; switch (stringset) { @@ -2559,17 +2409,7 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) break; case ETH_SS_TEST: - /* First 4 tests cannot be done in MF mode */ - if (!IS_MF(bp)) - start = 0; - else - start = 4; - for (i = 0, j = start; j < (start + BNX2X_NUM_TESTS(bp)); - i++, j++) { - offset = sprintf(buf+32*i, "%s", - bnx2x_tests_str_arr[j]); - *(buf+offset) = '\0'; - } + memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr)); break; } } @@ -2583,7 +2423,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev, if (is_multi(bp)) { for_each_eth_queue(bp, i) { - hw_stats = (u32 *)&bp->fp_stats[i].eth_q_stats; + hw_stats = (u32 *)&bp->fp[i].eth_q_stats; for (j = 0; j < BNX2X_NUM_Q_STATS; j++) { if (bnx2x_q_stats_arr[j].size == 0) { /* skip this counter */ @@ -2667,41 +2507,6 @@ static int bnx2x_set_phys_id(struct net_device *dev, return 0; } -static int bnx2x_get_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info) -{ - - switch (info->flow_type) { - case TCP_V4_FLOW: - case TCP_V6_FLOW: - info->data = RXH_IP_SRC | RXH_IP_DST | - RXH_L4_B_0_1 | RXH_L4_B_2_3; - break; - case UDP_V4_FLOW: - if (bp->rss_conf_obj.udp_rss_v4) - info->data = RXH_IP_SRC | RXH_IP_DST | - RXH_L4_B_0_1 | RXH_L4_B_2_3; - else - info->data = RXH_IP_SRC | RXH_IP_DST; - break; - case UDP_V6_FLOW: - if (bp->rss_conf_obj.udp_rss_v6) - info->data = RXH_IP_SRC | RXH_IP_DST | - RXH_L4_B_0_1 | RXH_L4_B_2_3; - else - info->data = RXH_IP_SRC | RXH_IP_DST; - break; - case IPV4_FLOW: - case IPV6_FLOW: - info->data = RXH_IP_SRC | RXH_IP_DST; - break; - default: - info->data = 0; - break; - } - - return 0; -} - static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rules __always_unused) { @@ -2711,102 +2516,7 @@ static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, case ETHTOOL_GRXRINGS: info->data = BNX2X_NUM_ETH_QUEUES(bp); return 0; - case ETHTOOL_GRXFH: - return bnx2x_get_rss_flags(bp, info); - default: - DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n"); - return -EOPNOTSUPP; - } -} - -static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info) -{ - int udp_rss_requested; - - DP(BNX2X_MSG_ETHTOOL, - "Set rss flags command parameters: flow type = %d, data = %llu\n", - info->flow_type, info->data); - - switch (info->flow_type) { - case TCP_V4_FLOW: - case TCP_V6_FLOW: - /* For TCP only 4-tupple hash is supported */ - if (info->data ^ (RXH_IP_SRC | RXH_IP_DST | - RXH_L4_B_0_1 | RXH_L4_B_2_3)) { - DP(BNX2X_MSG_ETHTOOL, - "Command parameters not supported\n"); - return -EINVAL; - } else { - return 0; - } - - case UDP_V4_FLOW: - case UDP_V6_FLOW: - /* For UDP either 2-tupple hash or 4-tupple hash is supported */ - if (info->data == (RXH_IP_SRC | RXH_IP_DST | - RXH_L4_B_0_1 | RXH_L4_B_2_3)) - udp_rss_requested = 1; - else if (info->data == (RXH_IP_SRC | RXH_IP_DST)) - udp_rss_requested = 0; - else - return -EINVAL; - if ((info->flow_type == UDP_V4_FLOW) && - (bp->rss_conf_obj.udp_rss_v4 != udp_rss_requested)) { - bp->rss_conf_obj.udp_rss_v4 = udp_rss_requested; - DP(BNX2X_MSG_ETHTOOL, - "rss re-configured, UDP 4-tupple %s\n", - udp_rss_requested ? "enabled" : "disabled"); - return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0); - } else if ((info->flow_type == UDP_V6_FLOW) && - (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) { - bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested; - return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0); - DP(BNX2X_MSG_ETHTOOL, - "rss re-configured, UDP 4-tupple %s\n", - udp_rss_requested ? "enabled" : "disabled"); - } else { - return 0; - } - case IPV4_FLOW: - case IPV6_FLOW: - /* For IP only 2-tupple hash is supported */ - if (info->data ^ (RXH_IP_SRC | RXH_IP_DST)) { - DP(BNX2X_MSG_ETHTOOL, - "Command parameters not supported\n"); - return -EINVAL; - } else { - return 0; - } - case SCTP_V4_FLOW: - case AH_ESP_V4_FLOW: - case AH_V4_FLOW: - case ESP_V4_FLOW: - case SCTP_V6_FLOW: - case AH_ESP_V6_FLOW: - case AH_V6_FLOW: - case ESP_V6_FLOW: - case IP_USER_FLOW: - case ETHER_FLOW: - /* RSS is not supported for these protocols */ - if (info->data) { - DP(BNX2X_MSG_ETHTOOL, - "Command parameters not supported\n"); - return -EINVAL; - } else { - return 0; - } - default: - return -EINVAL; - } -} - -static int bnx2x_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info) -{ - struct bnx2x *bp = netdev_priv(dev); - switch (info->cmd) { - case ETHTOOL_SRXFH: - return bnx2x_set_rss_flags(bp, info); default: DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n"); return -EOPNOTSUPP; @@ -2846,6 +2556,7 @@ static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir) { struct bnx2x *bp = netdev_priv(dev); size_t i; + u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0}; for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) { /* @@ -2857,88 +2568,10 @@ static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir) * align the received table to the Client ID of the leading RSS * queue */ - bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id; - } - - return bnx2x_config_rss_eth(bp, false); -} - -/** - * bnx2x_get_channels - gets the number of RSS queues. - * - * @dev: net device - * @channels: returns the number of max / current queues - */ -static void bnx2x_get_channels(struct net_device *dev, - struct ethtool_channels *channels) -{ - struct bnx2x *bp = netdev_priv(dev); - - channels->max_combined = BNX2X_MAX_RSS_COUNT(bp); - channels->combined_count = BNX2X_NUM_ETH_QUEUES(bp); -} - -/** - * bnx2x_change_num_queues - change the number of RSS queues. - * - * @bp: bnx2x private structure - * - * Re-configure interrupt mode to get the new number of MSI-X - * vectors and re-add NAPI objects. - */ -static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss) -{ - bnx2x_del_all_napi(bp); - bnx2x_disable_msi(bp); - BNX2X_NUM_QUEUES(bp) = num_rss + NON_ETH_CONTEXT_USE; - bnx2x_set_int_mode(bp); - bnx2x_add_all_napi(bp); -} - -/** - * bnx2x_set_channels - sets the number of RSS queues. - * - * @dev: net device - * @channels: includes the number of queues requested - */ -static int bnx2x_set_channels(struct net_device *dev, - struct ethtool_channels *channels) -{ - struct bnx2x *bp = netdev_priv(dev); - - - DP(BNX2X_MSG_ETHTOOL, - "set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n", - channels->rx_count, channels->tx_count, channels->other_count, - channels->combined_count); - - /* We don't support separate rx / tx channels. - * We don't allow setting 'other' channels. - */ - if (channels->rx_count || channels->tx_count || channels->other_count - || (channels->combined_count == 0) || - (channels->combined_count > BNX2X_MAX_RSS_COUNT(bp))) { - DP(BNX2X_MSG_ETHTOOL, "command parameters not supported\n"); - return -EINVAL; + ind_table[i] = indir[i] + bp->fp->cl_id; } - /* Check if there was a change in the active parameters */ - if (channels->combined_count == BNX2X_NUM_ETH_QUEUES(bp)) { - DP(BNX2X_MSG_ETHTOOL, "No change in active parameters\n"); - return 0; - } - - /* Set the requested number of queues in bp context. - * Note that the actual number of queues created during load may be - * less than requested if memory is low. - */ - if (unlikely(!netif_running(dev))) { - bnx2x_change_num_queues(bp, channels->combined_count); - return 0; - } - bnx2x_nic_unload(bp, UNLOAD_NORMAL); - bnx2x_change_num_queues(bp, channels->combined_count); - return bnx2x_nic_load(bp, LOAD_NORMAL); + return bnx2x_config_rss_eth(bp, ind_table, false); } static const struct ethtool_ops bnx2x_ethtool_ops = { @@ -2968,14 +2601,9 @@ static const struct ethtool_ops bnx2x_ethtool_ops = { .set_phys_id = bnx2x_set_phys_id, .get_ethtool_stats = bnx2x_get_ethtool_stats, .get_rxnfc = bnx2x_get_rxnfc, - .set_rxnfc = bnx2x_set_rxnfc, .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size, .get_rxfh_indir = bnx2x_get_rxfh_indir, .set_rxfh_indir = bnx2x_set_rxfh_indir, - .get_channels = bnx2x_get_channels, - .set_channels = bnx2x_set_channels, - .get_module_info = bnx2x_get_module_info, - .get_module_eeprom = bnx2x_get_module_eeprom, .get_eee = bnx2x_get_eee, .set_eee = bnx2x_set_eee, }; diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index e7c390c66b2a..c61aa37298a3 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -1150,7 +1150,6 @@ struct drv_port_mb { u32 link_status; /* Driver should update this field on any link change event */ - #define LINK_STATUS_NONE (0<<0) #define LINK_STATUS_LINK_FLAG_MASK 0x00000001 #define LINK_STATUS_LINK_UP 0x00000001 #define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001E @@ -1208,7 +1207,6 @@ struct drv_port_mb { #define LINK_STATUS_PFC_ENABLED 0x20000000 #define LINK_STATUS_PHYSICAL_LINK_FLAG 0x40000000 - #define LINK_STATUS_SFP_TX_FAULT 0x80000000 u32 port_stx; @@ -1255,7 +1253,6 @@ struct drv_func_mb { #define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG 0xb0000000 #define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000 - #define REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF 0x00070401 #define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000 diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index 3e662bf7e5d8..91aa565d4374 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -284,6 +284,7 @@ #define ETS_E3B0_PBF_MIN_W_VAL (10000) #define MAX_PACKET_SIZE (9700) +#define WC_UC_TIMEOUT 100 #define MAX_KR_LINK_RETRY 4 /**********************************************************/ @@ -1626,7 +1627,7 @@ static void bnx2x_umac_enable(struct link_params *params, /* Reset UMAC */ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port)); - usleep_range(1000, 2000); + usleep_range(1000, 1000); REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, (MISC_REGISTERS_RESET_REG_2_UMAC0 << params->port)); @@ -1728,7 +1729,7 @@ static void bnx2x_xmac_init(struct link_params *params, u32 max_speed) /* Hard reset */ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, MISC_REGISTERS_RESET_REG_2_XMAC); - usleep_range(1000, 2000); + usleep_range(1000, 1000); REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, MISC_REGISTERS_RESET_REG_2_XMAC); @@ -1758,7 +1759,7 @@ static void bnx2x_xmac_init(struct link_params *params, u32 max_speed) /* Soft reset */ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, MISC_REGISTERS_RESET_REG_2_XMAC_SOFT); - usleep_range(1000, 2000); + usleep_range(1000, 1000); REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, MISC_REGISTERS_RESET_REG_2_XMAC_SOFT); @@ -1879,6 +1880,11 @@ static int bnx2x_emac_enable(struct link_params *params, bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE, EMAC_TX_MODE_RESET); + if (CHIP_REV_IS_SLOW(bp)) { + /* config GMII mode */ + val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE); + EMAC_WR(bp, EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII)); + } else { /* ASIC */ /* pause enable/disable */ bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE, EMAC_RX_MODE_FLOW_EN); @@ -1901,6 +1907,7 @@ static int bnx2x_emac_enable(struct link_params *params, } else bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE, EMAC_TX_MODE_FLOW_EN); + } /* KEEP_VLAN_TAG, promiscuous */ val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE); @@ -1939,23 +1946,23 @@ static int bnx2x_emac_enable(struct link_params *params, val &= ~0x810; EMAC_WR(bp, EMAC_REG_EMAC_MODE, val); - /* Enable emac */ + /* enable emac */ REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 1); - /* Enable emac for jumbo packets */ + /* enable emac for jumbo packets */ EMAC_WR(bp, EMAC_REG_EMAC_RX_MTU_SIZE, (EMAC_RX_MTU_SIZE_JUMBO_ENA | (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD))); - /* Strip CRC */ + /* strip CRC */ REG_WR(bp, NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1); - /* Disable the NIG in/out to the bmac */ + /* disable the NIG in/out to the bmac */ REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0x0); REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, 0x0); REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0x0); - /* Enable the NIG in/out to the emac */ + /* enable the NIG in/out to the emac */ REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x1); val = 0; if ((params->feature_config_flags & @@ -1990,7 +1997,7 @@ static void bnx2x_update_pfc_bmac1(struct link_params *params, wb_data[1] = 0; REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_data, 2); - /* TX control */ + /* tx control */ val = 0xc0; if (!(params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) && @@ -2050,7 +2057,7 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params, wb_data[0] &= ~(1<<2); } else { DP(NETIF_MSG_LINK, "PFC is disabled\n"); - /* Disable PFC RX & TX & STATS and set 8 COS */ + /* disable PFC RX & TX & STATS and set 8 COS */ wb_data[0] = 0x8; wb_data[1] = 0; } @@ -2144,7 +2151,7 @@ static int bnx2x_pfc_brb_get_config_params( PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE; config_val->pauseable_th.full_xon = PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE; - /* Non pause able*/ + /* non pause able*/ config_val->non_pauseable_th.pause_xoff = PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; config_val->non_pauseable_th.pause_xon = @@ -2172,7 +2179,7 @@ static int bnx2x_pfc_brb_get_config_params( PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE; config_val->pauseable_th.full_xon = PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE; - /* Non pause able*/ + /* non pause able*/ config_val->non_pauseable_th.pause_xoff = PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; config_val->non_pauseable_th.pause_xon = @@ -2202,7 +2209,7 @@ static int bnx2x_pfc_brb_get_config_params( PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE; config_val->pauseable_th.full_xon = PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE; - /* Non pause able*/ + /* non pause able*/ config_val->non_pauseable_th.pause_xoff = PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; config_val->non_pauseable_th.pause_xon = @@ -2220,7 +2227,7 @@ static int bnx2x_pfc_brb_get_config_params( PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE; config_val->pauseable_th.full_xon = PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE; - /* Non pause able*/ + /* non pause able*/ config_val->non_pauseable_th.pause_xoff = PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE; config_val->non_pauseable_th.pause_xon = @@ -2277,7 +2284,7 @@ static void bnx2x_pfc_brb_get_e3b0_config_params( if (pfc_params->cos0_pauseable != pfc_params->cos1_pauseable) { - /* Nonpauseable= Lossy + pauseable = Lossless*/ + /* nonpauseable= Lossy + pauseable = Lossless*/ e3b0_val->lb_guarantied = PFC_E3B0_2P_MIX_PAUSE_LB_GUART; e3b0_val->mac_0_class_t_guarantied = @@ -2476,9 +2483,9 @@ static int bnx2x_update_pfc_brb(struct link_params *params, * This function is needed because NIG ARB_CREDIT_WEIGHT_X are * not continues and ARB_CREDIT_WEIGHT_0 + offset is suitable. ******************************************************************************/ -static int bnx2x_pfc_nig_rx_priority_mask(struct bnx2x *bp, - u8 cos_entry, - u32 priority_mask, u8 port) +int bnx2x_pfc_nig_rx_priority_mask(struct bnx2x *bp, + u8 cos_entry, + u32 priority_mask, u8 port) { u32 nig_reg_rx_priority_mask_add = 0; @@ -2605,7 +2612,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params, REG_WR(bp, port ? NIG_REG_LLFC_EGRESS_SRC_ENABLE_1 : NIG_REG_LLFC_EGRESS_SRC_ENABLE_0, 0x7); - /* Output enable for RX_XCM # IF */ + /* output enable for RX_XCM # IF */ REG_WR(bp, port ? NIG_REG_XCM1_OUT_EN : NIG_REG_XCM0_OUT_EN, xcm_out_en); @@ -2654,10 +2661,10 @@ int bnx2x_update_pfc(struct link_params *params, bnx2x_update_mng(params, vars->link_status); - /* Update NIG params */ + /* update NIG params */ bnx2x_update_pfc_nig(params, vars, pfc_params); - /* Update BRB params */ + /* update BRB params */ bnx2x_status = bnx2x_update_pfc_brb(params, vars, pfc_params); if (bnx2x_status) return bnx2x_status; @@ -2712,7 +2719,7 @@ static int bnx2x_bmac1_enable(struct link_params *params, REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL, wb_data, 2); - /* TX MAC SA */ + /* tx MAC SA */ wb_data[0] = ((params->mac_addr[2] << 24) | (params->mac_addr[3] << 16) | (params->mac_addr[4] << 8) | @@ -2721,7 +2728,7 @@ static int bnx2x_bmac1_enable(struct link_params *params, params->mac_addr[1]); REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, wb_data, 2); - /* MAC control */ + /* mac control */ val = 0x3; if (is_lb) { val |= 0x4; @@ -2731,24 +2738,24 @@ static int bnx2x_bmac1_enable(struct link_params *params, wb_data[1] = 0; REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2); - /* Set rx mtu */ + /* set rx mtu */ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; wb_data[1] = 0; REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_data, 2); bnx2x_update_pfc_bmac1(params, vars); - /* Set tx mtu */ + /* set tx mtu */ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; wb_data[1] = 0; REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_data, 2); - /* Set cnt max size */ + /* set cnt max size */ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; wb_data[1] = 0; REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE, wb_data, 2); - /* Configure SAFC */ + /* configure safc */ wb_data[0] = 0x1000200; wb_data[1] = 0; REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS, @@ -2782,7 +2789,7 @@ static int bnx2x_bmac2_enable(struct link_params *params, udelay(30); - /* TX MAC SA */ + /* tx MAC SA */ wb_data[0] = ((params->mac_addr[2] << 24) | (params->mac_addr[3] << 16) | (params->mac_addr[4] << 8) | @@ -2801,18 +2808,18 @@ static int bnx2x_bmac2_enable(struct link_params *params, wb_data, 2); udelay(30); - /* Set RX MTU */ + /* set rx mtu */ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; wb_data[1] = 0; REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_MAX_SIZE, wb_data, 2); udelay(30); - /* Set TX MTU */ + /* set tx mtu */ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; wb_data[1] = 0; REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_MAX_SIZE, wb_data, 2); udelay(30); - /* Set cnt max size */ + /* set cnt max size */ wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD - 2; wb_data[1] = 0; REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, wb_data, 2); @@ -2830,15 +2837,15 @@ static int bnx2x_bmac_enable(struct link_params *params, u8 port = params->port; struct bnx2x *bp = params->bp; u32 val; - /* Reset and unreset the BigMac */ + /* reset and unreset the BigMac */ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); - usleep_range(1000, 2000); + msleep(1); REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); - /* Enable access for bmac registers */ + /* enable access for bmac registers */ REG_WR(bp, NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1); /* Enable BMAC according to BMAC type*/ @@ -2896,7 +2903,7 @@ static void bnx2x_bmac_rx_disable(struct bnx2x *bp, u8 port) BIGMAC_REGISTER_BMAC_CONTROL, wb_data, 2); } - usleep_range(1000, 2000); + msleep(1); } } @@ -2908,16 +2915,17 @@ static int bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl, u32 init_crd, crd; u32 count = 1000; - /* Disable port */ + /* disable port */ REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1); - /* Wait for init credit */ + /* wait for init credit */ init_crd = REG_RD(bp, PBF_REG_P0_INIT_CRD + port*4); crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8); DP(NETIF_MSG_LINK, "init_crd 0x%x crd 0x%x\n", init_crd, crd); while ((init_crd != crd) && count) { - usleep_range(5000, 10000); + msleep(5); + crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8); count--; } @@ -2934,18 +2942,18 @@ static int bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl, line_speed == SPEED_1000 || line_speed == SPEED_2500) { REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 1); - /* Update threshold */ + /* update threshold */ REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0); - /* Update init credit */ + /* update init credit */ init_crd = 778; /* (800-18-4) */ } else { u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD)/16; REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0); - /* Update threshold */ + /* update threshold */ REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh); - /* Update init credit */ + /* update init credit */ switch (line_speed) { case SPEED_10000: init_crd = thresh + 553 - 22; @@ -2960,12 +2968,12 @@ static int bnx2x_pbf_update(struct link_params *params, u32 flow_ctrl, DP(NETIF_MSG_LINK, "PBF updated to speed %d credit %d\n", line_speed, init_crd); - /* Probe the credit changes */ + /* probe the credit changes */ REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x1); - usleep_range(5000, 10000); + msleep(5); REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x0); - /* Enable port */ + /* enable port */ REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x0); return 0; } @@ -3032,7 +3040,7 @@ static int bnx2x_cl22_write(struct bnx2x *bp, REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode & ~EMAC_MDIO_MODE_CLAUSE_45); - /* Address */ + /* address */ tmp = ((phy->addr << 21) | (reg << 16) | val | EMAC_MDIO_COMM_COMMAND_WRITE_22 | EMAC_MDIO_COMM_START_BUSY); @@ -3068,7 +3076,7 @@ static int bnx2x_cl22_read(struct bnx2x *bp, REG_WR(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, mode & ~EMAC_MDIO_MODE_CLAUSE_45); - /* Address */ + /* address */ val = ((phy->addr << 21) | (reg << 16) | EMAC_MDIO_COMM_COMMAND_READ_22 | EMAC_MDIO_COMM_START_BUSY); @@ -3106,7 +3114,7 @@ static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy, if (phy->flags & FLAGS_MDC_MDIO_WA_B0) bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, EMAC_MDIO_STATUS_10MB); - /* Address */ + /* address */ val = ((phy->addr << 21) | (devad << 16) | reg | EMAC_MDIO_COMM_COMMAND_ADDRESS | EMAC_MDIO_COMM_START_BUSY); @@ -3127,7 +3135,7 @@ static int bnx2x_cl45_read(struct bnx2x *bp, struct bnx2x_phy *phy, *ret_val = 0; rc = -EFAULT; } else { - /* Data */ + /* data */ val = ((phy->addr << 21) | (devad << 16) | EMAC_MDIO_COMM_COMMAND_READ_45 | EMAC_MDIO_COMM_START_BUSY); @@ -3175,7 +3183,7 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy, bnx2x_bits_en(bp, phy->mdio_ctrl + EMAC_REG_EMAC_MDIO_STATUS, EMAC_MDIO_STATUS_10MB); - /* Address */ + /* address */ tmp = ((phy->addr << 21) | (devad << 16) | reg | EMAC_MDIO_COMM_COMMAND_ADDRESS | EMAC_MDIO_COMM_START_BUSY); @@ -3195,7 +3203,7 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy, netdev_err(bp->dev, "MDC/MDIO access timeout\n"); rc = -EFAULT; } else { - /* Data */ + /* data */ tmp = ((phy->addr << 21) | (devad << 16) | val | EMAC_MDIO_COMM_COMMAND_WRITE_45 | EMAC_MDIO_COMM_START_BUSY); @@ -3285,23 +3293,23 @@ static int bnx2x_bsc_read(struct link_params *params, xfer_cnt = 16 - lc_addr; - /* Enable the engine */ + /* enable the engine */ val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); val |= MCPR_IMC_COMMAND_ENABLE; REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val); - /* Program slave device ID */ + /* program slave device ID */ val = (sl_devid << 16) | sl_addr; REG_WR(bp, MCP_REG_MCPR_IMC_SLAVE_CONTROL, val); - /* Start xfer with 0 byte to update the address pointer ???*/ + /* start xfer with 0 byte to update the address pointer ???*/ val = (MCPR_IMC_COMMAND_ENABLE) | (MCPR_IMC_COMMAND_WRITE_OP << MCPR_IMC_COMMAND_OPERATION_BITSHIFT) | (lc_addr << MCPR_IMC_COMMAND_TRANSFER_ADDRESS_BITSHIFT) | (0); REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val); - /* Poll for completion */ + /* poll for completion */ i = 0; val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) { @@ -3317,7 +3325,7 @@ static int bnx2x_bsc_read(struct link_params *params, if (rc == -EFAULT) return rc; - /* Start xfer with read op */ + /* start xfer with read op */ val = (MCPR_IMC_COMMAND_ENABLE) | (MCPR_IMC_COMMAND_READ_OP << MCPR_IMC_COMMAND_OPERATION_BITSHIFT) | @@ -3325,7 +3333,7 @@ static int bnx2x_bsc_read(struct link_params *params, (xfer_cnt); REG_WR(bp, MCP_REG_MCPR_IMC_COMMAND, val); - /* Poll for completion */ + /* poll for completion */ i = 0; val = REG_RD(bp, MCP_REG_MCPR_IMC_COMMAND); while (((val >> MCPR_IMC_COMMAND_IMC_STATUS_BITSHIFT) & 0x3) != 1) { @@ -3428,7 +3436,7 @@ static u8 bnx2x_get_warpcore_lane(struct bnx2x_phy *phy, port = port ^ 1; lane = (port<<1) + path; - } else { /* Two port mode - no port swap */ + } else { /* two port mode - no port swap */ /* Figure out path swap value */ path_swap_ovr = @@ -3506,7 +3514,7 @@ static void bnx2x_serdes_deassert(struct bnx2x *bp, u8 port) val = SERDES_RESET_BITS << (port*16); - /* Reset and unreset the SerDes/XGXS */ + /* reset and unreset the SerDes/XGXS */ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val); udelay(500); REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val); @@ -3527,7 +3535,7 @@ static void bnx2x_xgxs_deassert(struct link_params *params) val = XGXS_RESET_BITS << (port*16); - /* Reset and unreset the SerDes/XGXS */ + /* reset and unreset the SerDes/XGXS */ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val); udelay(500); REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val); @@ -3619,7 +3627,7 @@ static void bnx2x_ext_phy_set_pause(struct link_params *params, { u16 val; struct bnx2x *bp = params->bp; - /* Read modify write pause advertizing */ + /* read modify write pause advertizing */ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, &val); val &= ~MDIO_AN_REG_ADV_PAUSE_BOTH; @@ -3754,35 +3762,44 @@ static u8 bnx2x_ext_phy_resolve_fc(struct bnx2x_phy *phy, static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) { - u16 val16 = 0, lane, i; - struct bnx2x *bp = params->bp; - static struct bnx2x_reg_set reg_set[] = { - {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7}, - {MDIO_AN_DEVAD, MDIO_WC_REG_PAR_DET_10G_CTRL, 0}, - {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0}, - {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0xff}, - {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0x5555}, - {MDIO_PMA_DEVAD, MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0x0}, - {MDIO_WC_DEVAD, MDIO_WC_REG_RX66_CONTROL, 0x7415}, - {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190}, - /* Disable Autoneg: re-enable it after adv is done. */ - {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0} - }; + u16 val16 = 0, lane, bam37 = 0; + struct bnx2x *bp = params->bp; DP(NETIF_MSG_LINK, "Enable Auto Negotiation for KR\n"); /* Set to default registers that may be overriden by 10G force */ - for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++) - bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, - reg_set[i].val); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7); + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, + MDIO_WC_REG_PAR_DET_10G_CTRL, 0); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0xff); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0x5555); + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, + MDIO_WC_REG_IEEE0BLK_AUTONEGNP, 0x0); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_CONTROL, 0x7415); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_MISC2, 0x6190); + /* Disable Autoneg: re-enable it after adv is done. */ + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, 0); /* Check adding advertisement for 1G KX */ if (((vars->line_speed == SPEED_AUTO_NEG) && (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) || (vars->line_speed == SPEED_1000)) { - u32 addr = MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2; + u16 sd_digital; val16 |= (1<<5); /* Enable CL37 1G Parallel Detect */ - bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, addr, 0x1); + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, &sd_digital); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, + (sd_digital | 0x1)); + DP(NETIF_MSG_LINK, "Advertize 1G\n"); } if (((vars->line_speed == SPEED_AUTO_NEG) && @@ -3792,7 +3809,7 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, val16 |= (1<<7); /* Enable 10G Parallel Detect */ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, - MDIO_WC_REG_PAR_DET_10G_CTRL, 1); + MDIO_WC_REG_PAR_DET_10G_CTRL, 1); DP(NETIF_MSG_LINK, "Advertize 10G\n"); } @@ -3826,9 +3843,10 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, offsetof(struct shmem_region, dev_info. port_hw_config[params->port].default_cfg)) & PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) { - bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL, - 1); + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL, &bam37); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL6_MP5_NEXTPAGECTRL, bam37 | 1); DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n"); } @@ -3842,8 +3860,11 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "Enable AN KR work-around\n"); vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY; } - bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DIGITAL5_MISC7, 0x100); + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL5_MISC7, &val16); + + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL5_MISC7, val16 | 0x100); /* Over 1G - AN local device user page 1 */ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, @@ -3860,35 +3881,50 @@ static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy, struct link_vars *vars) { struct bnx2x *bp = params->bp; - u16 i; - static struct bnx2x_reg_set reg_set[] = { - /* Disable Autoneg */ - {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7}, - {MDIO_AN_DEVAD, MDIO_WC_REG_PAR_DET_10G_CTRL, 0}, - {MDIO_WC_DEVAD, MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, - 0x3f00}, - {MDIO_AN_DEVAD, MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0}, - {MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0}, - {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL3_UP1, 0x1}, - {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL5_MISC7, 0xa}, - /* Disable CL36 PCS Tx */ - {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0x0}, - /* Double Wide Single Data Rate @ pll rate */ - {MDIO_WC_DEVAD, MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0xFFFF}, - /* Leave cl72 training enable, needed for KR */ - {MDIO_PMA_DEVAD, - MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150, - 0x2} - }; + u16 val; + + /* Disable Autoneg */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7); + + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, + MDIO_WC_REG_PAR_DET_10G_CTRL, 0); + + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, 0x3f00); - for (i = 0; i < sizeof(reg_set)/sizeof(struct bnx2x_reg_set); i++) - bnx2x_cl45_write(bp, phy, reg_set[i].devad, reg_set[i].reg, - reg_set[i].val); + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, + MDIO_WC_REG_AN_IEEE1BLK_AN_ADVERTISEMENT1, 0); + + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x0); + + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL3_UP1, 0x1); + + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL5_MISC7, 0xa); + + /* Disable CL36 PCS Tx */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL0, 0x0); + + /* Double Wide Single Data Rate @ pll rate */ + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK1_LANECTRL1, 0xFFFF); + + /* Leave cl72 training enable, needed for KR */ + bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, + MDIO_WC_REG_PMD_IEEE9BLK_TENGBASE_KR_PMD_CONTROL_REGISTER_150, + 0x2); /* Leave CL72 enabled */ - bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, - 0x3800); + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, + &val); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_CL72_USERB0_CL72_MISC1_CONTROL, + val | 0x3800); /* Set speed via PMA/PMD register */ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, @@ -3909,7 +3945,7 @@ static void bnx2x_warpcore_set_10G_KR(struct bnx2x_phy *phy, bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_RX66_CONTROL, 0xF9); - /* Set and clear loopback to cause a reset to 64/66 decoder */ + /* set and clear loopback to cause a reset to 64/66 decoder */ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x4000); bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, @@ -3924,12 +3960,16 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy, struct bnx2x *bp = params->bp; u16 misc1_val, tap_val, tx_driver_val, lane, val; /* Hold rxSeqStart */ - bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, 0x8000); + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, &val); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DSC2B0_DSC_MISC_CTRL0, (val | 0x8000)); /* Hold tx_fifo_reset */ - bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, 0x1); + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, &val); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, (val | 0x1)); /* Disable CL73 AN */ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0); @@ -3941,8 +3981,10 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy, MDIO_WC_REG_FX100_CTRL1, (val & 0xFFFA)); /* Disable 100FX Idle detect */ - bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_FX100_CTRL3, 0x0080); + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_FX100_CTRL3, &val); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_FX100_CTRL3, (val | 0x0080)); /* Set Block address to Remote PHY & Clear forced_speed[5] */ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, @@ -4003,12 +4045,16 @@ static void bnx2x_warpcore_set_10G_XFI(struct bnx2x_phy *phy, tx_driver_val); /* Enable fiber mode, enable and invert sig_det */ - bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0xd); + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, &val); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, val | 0xd); /* Set Block address to Remote PHY & Set forced_speed[5], 40bit mode */ - bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_DIGITAL4_MISC3, 0x8080); + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL4_MISC3, &val); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL4_MISC3, val | 0x8080); /* Enable LPI pass through */ DP(NETIF_MSG_LINK, "Configure WC for LPI pass through\n"); @@ -4206,35 +4252,40 @@ static void bnx2x_warpcore_clear_regs(struct bnx2x_phy *phy, u16 lane) { struct bnx2x *bp = params->bp; - u16 i; - static struct bnx2x_reg_set wc_regs[] = { - {MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0}, - {MDIO_WC_DEVAD, MDIO_WC_REG_FX100_CTRL1, 0x014a}, - {MDIO_WC_DEVAD, MDIO_WC_REG_FX100_CTRL3, 0x0800}, - {MDIO_WC_DEVAD, MDIO_WC_REG_DIGITAL4_MISC3, 0x8008}, - {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, - 0x0195}, - {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, - 0x0007}, - {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, - 0x0002}, - {MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6000}, - {MDIO_WC_DEVAD, MDIO_WC_REG_TX_FIR_TAP, 0x0000}, - {MDIO_WC_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040}, - {MDIO_WC_DEVAD, MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0x0140} - }; - /* Set XFI clock comp as default. */ - bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_RX66_CONTROL, (3<<13)); + u16 val16; - for (i = 0; i < sizeof(wc_regs)/sizeof(struct bnx2x_reg_set); i++) - bnx2x_cl45_write(bp, phy, wc_regs[i].devad, wc_regs[i].reg, - wc_regs[i].val); + /* Set XFI clock comp as default. */ + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_CONTROL, &val16); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_RX66_CONTROL, val16 | (3<<13)); + bnx2x_warpcore_reset_lane(bp, phy, 1); + bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_CTRL, 0); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_FX100_CTRL1, 0x014a); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_FX100_CTRL3, 0x0800); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_DIGITAL4_MISC3, 0x8008); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x0195); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x0007); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X3, 0x0002); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_SERDESDIGITAL_MISC1, 0x6000); lane = bnx2x_get_warpcore_lane(phy, params); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_TX_FIR_TAP, 0x0000); bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, MDIO_WC_REG_TX0_TX_DRIVER + 0x10*lane, 0x0990); - + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x2040); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_COMBO_IEEE0_MIICTRL, 0x0140); + bnx2x_warpcore_reset_lane(bp, phy, 0); } static int bnx2x_get_mod_abs_int_cfg(struct bnx2x *bp, @@ -4322,7 +4373,7 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy, if (!vars->turn_to_run_wc_rt) return; - /* Return if there is no link partner */ + /* return if there is no link partner */ if (!(bnx2x_warpcore_get_sigdet(phy, params))) { DP(NETIF_MSG_LINK, "bnx2x_warpcore_get_sigdet false\n"); return; @@ -4356,7 +4407,7 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy, bnx2x_warpcore_reset_lane(bp, phy, 1); bnx2x_warpcore_reset_lane(bp, phy, 0); - /* Restart Autoneg */ + /* restart Autoneg */ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1200); @@ -4373,23 +4424,6 @@ static void bnx2x_warpcore_config_runtime(struct bnx2x_phy *phy, } /*params->rx_tx_asic_rst*/ } -static void bnx2x_warpcore_config_sfi(struct bnx2x_phy *phy, - struct link_params *params) -{ - u16 lane = bnx2x_get_warpcore_lane(phy, params); - struct bnx2x *bp = params->bp; - bnx2x_warpcore_clear_regs(phy, params, lane); - if ((params->req_line_speed[LINK_CONFIG_IDX(INT_PHY)] == - SPEED_10000) && - (phy->media_type != ETH_PHY_SFP_1G_FIBER)) { - DP(NETIF_MSG_LINK, "Setting 10G SFI\n"); - bnx2x_warpcore_set_10G_XFI(phy, params, 0); - } else { - DP(NETIF_MSG_LINK, "Setting 1G Fiber\n"); - bnx2x_warpcore_set_sgmii_speed(phy, params, 1, 0); - } -} - static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) @@ -4450,11 +4484,19 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy, break; case PORT_HW_CFG_NET_SERDES_IF_SFI: + + bnx2x_warpcore_clear_regs(phy, params, lane); + if (vars->line_speed == SPEED_10000) { + DP(NETIF_MSG_LINK, "Setting 10G SFI\n"); + bnx2x_warpcore_set_10G_XFI(phy, params, 0); + } else if (vars->line_speed == SPEED_1000) { + DP(NETIF_MSG_LINK, "Setting 1G Fiber\n"); + bnx2x_warpcore_set_sgmii_speed( + phy, params, 1, 0); + } /* Issue Module detection */ if (bnx2x_is_sfp_module_plugged(phy, params)) bnx2x_sfp_module_detection(phy, params); - - bnx2x_warpcore_config_sfi(phy, params); break; case PORT_HW_CFG_NET_SERDES_IF_DXGXS: @@ -4571,9 +4613,12 @@ static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy, CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK, MDIO_AER_BLOCK_AER_REG, 0); /* Enable 1G MDIO (1-copy) */ - bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, - 0x10); + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, + &val16); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_XGXSBLK0_XGXSCONTROL, + val16 | 0x10); /* Set 1G loopback based on lane (1-copy) */ lane = bnx2x_get_warpcore_lane(phy, params); bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, @@ -4586,19 +4631,22 @@ static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy, bnx2x_set_aer_mmd(params, phy); } else { /* 10G & 20G */ - bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_COMBO_IEEE0_MIICTRL, - 0x4000); + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 | + 0x4000); - bnx2x_cl45_read_or_write(bp, phy, MDIO_WC_DEVAD, - MDIO_WC_REG_IEEE0BLK_MIICNTL, 0x1); + bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, &val16); + bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD, + MDIO_WC_REG_IEEE0BLK_MIICNTL, val16 | 0x1); } } - -static void bnx2x_sync_link(struct link_params *params, - struct link_vars *vars) +void bnx2x_sync_link(struct link_params *params, + struct link_vars *vars) { struct bnx2x *bp = params->bp; u8 link_10g_plus; @@ -4671,7 +4719,7 @@ static void bnx2x_sync_link(struct link_params *params, USES_WARPCORE(bp) && (vars->line_speed == SPEED_1000)) vars->phy_flags |= PHY_SGMII_FLAG; - /* Anything 10 and over uses the bmac */ + /* anything 10 and over uses the bmac */ link_10g_plus = (vars->line_speed >= SPEED_10000); if (link_10g_plus) { @@ -4685,7 +4733,7 @@ static void bnx2x_sync_link(struct link_params *params, else vars->mac_type = MAC_TYPE_EMAC; } - } else { /* Link down */ + } else { /* link down */ DP(NETIF_MSG_LINK, "phy link down\n"); vars->phy_link_up = 0; @@ -4694,12 +4742,10 @@ static void bnx2x_sync_link(struct link_params *params, vars->duplex = DUPLEX_FULL; vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; - /* Indicate no mac active */ + /* indicate no mac active */ vars->mac_type = MAC_TYPE_NONE; if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG) vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG; - if (vars->link_status & LINK_STATUS_SFP_TX_FAULT) - vars->phy_flags |= PHY_SFP_TX_FAULT_FLAG; } } @@ -4765,7 +4811,7 @@ static void bnx2x_set_master_ln(struct link_params *params, PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >> PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT); - /* Set the master_ln for AN */ + /* set the master_ln for AN */ CL22_RD_OVER_CL45(bp, phy, MDIO_REG_BANK_XGXS_BLOCK2, MDIO_XGXS_BLOCK2_TEST_MODE_LANE, @@ -4788,7 +4834,7 @@ static int bnx2x_reset_unicore(struct link_params *params, MDIO_REG_BANK_COMBO_IEEE0, MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control); - /* Reset the unicore */ + /* reset the unicore */ CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_COMBO_IEEE0, MDIO_COMBO_IEEE0_MII_CONTROL, @@ -4797,11 +4843,11 @@ static int bnx2x_reset_unicore(struct link_params *params, if (set_serdes) bnx2x_set_serdes_access(bp, params->port); - /* Wait for the reset to self clear */ + /* wait for the reset to self clear */ for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) { udelay(5); - /* The reset erased the previous bank value */ + /* the reset erased the previous bank value */ CL22_RD_OVER_CL45(bp, phy, MDIO_REG_BANK_COMBO_IEEE0, MDIO_COMBO_IEEE0_MII_CONTROL, @@ -5019,7 +5065,7 @@ static void bnx2x_set_autoneg(struct bnx2x_phy *phy, MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val); } -/* Program SerDes, forced speed */ +/* program SerDes, forced speed */ static void bnx2x_program_serdes(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) @@ -5027,7 +5073,7 @@ static void bnx2x_program_serdes(struct bnx2x_phy *phy, struct bnx2x *bp = params->bp; u16 reg_val; - /* Program duplex, disable autoneg and sgmii*/ + /* program duplex, disable autoneg and sgmii*/ CL22_RD_OVER_CL45(bp, phy, MDIO_REG_BANK_COMBO_IEEE0, MDIO_COMBO_IEEE0_MII_CONTROL, ®_val); @@ -5046,7 +5092,7 @@ static void bnx2x_program_serdes(struct bnx2x_phy *phy, CL22_RD_OVER_CL45(bp, phy, MDIO_REG_BANK_SERDES_DIGITAL, MDIO_SERDES_DIGITAL_MISC1, ®_val); - /* Clearing the speed value before setting the right speed */ + /* clearing the speed value before setting the right speed */ DP(NETIF_MSG_LINK, "MDIO_REG_BANK_SERDES_DIGITAL = 0x%x\n", reg_val); reg_val &= ~(MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK | @@ -5075,7 +5121,7 @@ static void bnx2x_set_brcm_cl37_advertisement(struct bnx2x_phy *phy, struct bnx2x *bp = params->bp; u16 val = 0; - /* Set extended capabilities */ + /* set extended capabilities */ if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G) val |= MDIO_OVER_1G_UP1_2_5G; if (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) @@ -5095,7 +5141,7 @@ static void bnx2x_set_ieee_aneg_advertisement(struct bnx2x_phy *phy, { struct bnx2x *bp = params->bp; u16 val; - /* For AN, we are always publishing full duplex */ + /* for AN, we are always publishing full duplex */ CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_COMBO_IEEE0, @@ -5157,14 +5203,14 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy, struct bnx2x *bp = params->bp; u16 control1; - /* In SGMII mode, the unicore is always slave */ + /* in SGMII mode, the unicore is always slave */ CL22_RD_OVER_CL45(bp, phy, MDIO_REG_BANK_SERDES_DIGITAL, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, &control1); control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT; - /* Set sgmii mode (and not fiber) */ + /* set sgmii mode (and not fiber) */ control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE | MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET | MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE); @@ -5173,9 +5219,9 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, control1); - /* If forced speed */ + /* if forced speed */ if (!(vars->line_speed == SPEED_AUTO_NEG)) { - /* Set speed, disable autoneg */ + /* set speed, disable autoneg */ u16 mii_control; CL22_RD_OVER_CL45(bp, phy, @@ -5196,16 +5242,16 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy, MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000; break; case SPEED_10: - /* There is nothing to set for 10M */ + /* there is nothing to set for 10M */ break; default: - /* Invalid speed for SGMII */ + /* invalid speed for SGMII */ DP(NETIF_MSG_LINK, "Invalid line_speed 0x%x\n", vars->line_speed); break; } - /* Setting the full duplex */ + /* setting the full duplex */ if (phy->req_duplex == DUPLEX_FULL) mii_control |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX; @@ -5215,7 +5261,7 @@ static void bnx2x_initialize_sgmii_process(struct bnx2x_phy *phy, mii_control); } else { /* AN mode */ - /* Enable and restart AN */ + /* enable and restart AN */ bnx2x_restart_autoneg(phy, params, 0); } } @@ -5311,7 +5357,7 @@ static void bnx2x_flow_ctrl_resolve(struct bnx2x_phy *phy, struct bnx2x *bp = params->bp; vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; - /* Resolve from gp_status in case of AN complete and not sgmii */ + /* resolve from gp_status in case of AN complete and not sgmii */ if (phy->req_flow_ctrl != BNX2X_FLOW_CTRL_AUTO) { /* Update the advertised flow-controled of LD/LP in AN */ if (phy->req_line_speed == SPEED_AUTO_NEG) @@ -5535,7 +5581,7 @@ static int bnx2x_link_settings_status(struct bnx2x_phy *phy, bnx2x_xgxs_an_resolve(phy, params, vars, gp_status); } - } else { /* Link_down */ + } else { /* link_down */ if ((phy->req_line_speed == SPEED_AUTO_NEG) && SINGLE_MEDIA_DIRECT(params)) { /* Check signal is detected */ @@ -5684,12 +5730,12 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params) u16 tx_driver; u16 bank; - /* Read precomp */ + /* read precomp */ CL22_RD_OVER_CL45(bp, phy, MDIO_REG_BANK_OVER_1G, MDIO_OVER_1G_LP_UP2, &lp_up2); - /* Bits [10:7] at lp_up2, positioned at [15:12] */ + /* bits [10:7] at lp_up2, positioned at [15:12] */ lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >> MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT) << MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT); @@ -5703,7 +5749,7 @@ static void bnx2x_set_gmii_tx_driver(struct link_params *params) bank, MDIO_TX0_TX_DRIVER, &tx_driver); - /* Replace tx_driver bits [15:12] */ + /* replace tx_driver bits [15:12] */ if (lp_up2 != (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK)) { tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK; @@ -5799,16 +5845,16 @@ static void bnx2x_xgxs_config_init(struct bnx2x_phy *phy, FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED)) bnx2x_set_preemphasis(phy, params); - /* Forced speed requested? */ + /* forced speed requested? */ if (vars->line_speed != SPEED_AUTO_NEG || (SINGLE_MEDIA_DIRECT(params) && params->loopback_mode == LOOPBACK_EXT)) { DP(NETIF_MSG_LINK, "not SGMII, no AN\n"); - /* Disable autoneg */ + /* disable autoneg */ bnx2x_set_autoneg(phy, params, vars, 0); - /* Program speed and duplex */ + /* program speed and duplex */ bnx2x_program_serdes(phy, params, vars); } else { /* AN_mode */ @@ -5817,14 +5863,14 @@ static void bnx2x_xgxs_config_init(struct bnx2x_phy *phy, /* AN enabled */ bnx2x_set_brcm_cl37_advertisement(phy, params); - /* Program duplex & pause advertisement (for aneg) */ + /* program duplex & pause advertisement (for aneg) */ bnx2x_set_ieee_aneg_advertisement(phy, params, vars->ieee_fc); - /* Enable autoneg */ + /* enable autoneg */ bnx2x_set_autoneg(phy, params, vars, enable_cl73); - /* Enable and restart AN */ + /* enable and restart AN */ bnx2x_restart_autoneg(phy, params, enable_cl73); } @@ -5860,12 +5906,12 @@ static int bnx2x_prepare_xgxs(struct bnx2x_phy *phy, bnx2x_set_master_ln(params, phy); rc = bnx2x_reset_unicore(params, phy, 0); - /* Reset the SerDes and wait for reset bit return low */ - if (rc) + /* reset the SerDes and wait for reset bit return low */ + if (rc != 0) return rc; bnx2x_set_aer_mmd(params, phy); - /* Setting the masterLn_def again after the reset */ + /* setting the masterLn_def again after the reset */ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) { bnx2x_set_master_ln(params, phy); bnx2x_set_swap_lanes(params, phy); @@ -5890,7 +5936,7 @@ static u16 bnx2x_wait_reset_complete(struct bnx2x *bp, MDIO_PMA_REG_CTRL, &ctrl); if (!(ctrl & (1<<15))) break; - usleep_range(1000, 2000); + msleep(1); } if (cnt == 1000) @@ -6121,7 +6167,7 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n"); if (!CHIP_IS_E3(bp)) { - /* Change the uni_phy_addr in the nig */ + /* change the uni_phy_addr in the nig */ md_devad = REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18)); @@ -6141,11 +6187,11 @@ static void bnx2x_set_xgxs_loopback(struct bnx2x_phy *phy, (MDIO_CL73_IEEEB0_CL73_AN_CONTROL & 0xf)), 0x6041); msleep(200); - /* Set aer mmd back */ + /* set aer mmd back */ bnx2x_set_aer_mmd(params, phy); if (!CHIP_IS_E3(bp)) { - /* And md_devad */ + /* and md_devad */ REG_WR(bp, NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, md_devad); } @@ -6342,7 +6388,7 @@ int bnx2x_test_link(struct link_params *params, struct link_vars *vars, MDIO_REG_BANK_GP_STATUS, MDIO_GP_STATUS_TOP_AN_STATUS1, &gp_status); - /* Link is up only if both local phy and external phy are up */ + /* link is up only if both local phy and external phy are up */ if (!(gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS)) return -ESRCH; } @@ -6363,9 +6409,7 @@ int bnx2x_test_link(struct link_params *params, struct link_vars *vars, for (phy_index = EXT_PHY1; phy_index < params->num_phys; phy_index++) { serdes_phy_type = ((params->phy[phy_index].media_type == - ETH_PHY_SFPP_10G_FIBER) || - (params->phy[phy_index].media_type == - ETH_PHY_SFP_1G_FIBER) || + ETH_PHY_SFP_FIBER) || (params->phy[phy_index].media_type == ETH_PHY_XFP_FIBER) || (params->phy[phy_index].media_type == @@ -6466,7 +6510,7 @@ static int bnx2x_link_initialize(struct link_params *params, static void bnx2x_int_link_reset(struct bnx2x_phy *phy, struct link_params *params) { - /* Reset the SerDes/XGXS */ + /* reset the SerDes/XGXS */ REG_WR(params->bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, (0x1ff << (params->port*16))); } @@ -6499,10 +6543,10 @@ static int bnx2x_update_link_down(struct link_params *params, DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port); bnx2x_set_led(params, vars, LED_MODE_OFF, 0); vars->phy_flags &= ~PHY_PHYSICAL_LINK_FLAG; - /* Indicate no mac active */ + /* indicate no mac active */ vars->mac_type = MAC_TYPE_NONE; - /* Update shared memory */ + /* update shared memory */ vars->link_status &= ~(LINK_STATUS_SPEED_AND_DUPLEX_MASK | LINK_STATUS_LINK_UP | LINK_STATUS_PHYSICAL_LINK_FLAG | @@ -6515,15 +6559,15 @@ static int bnx2x_update_link_down(struct link_params *params, vars->line_speed = 0; bnx2x_update_mng(params, vars->link_status); - /* Activate nig drain */ + /* activate nig drain */ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); - /* Disable emac */ + /* disable emac */ if (!CHIP_IS_E3(bp)) REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); - usleep_range(10000, 20000); - /* Reset BigMac/Xmac */ + msleep(10); + /* reset BigMac/Xmac */ if (CHIP_IS_E1x(bp) || CHIP_IS_E2(bp)) { bnx2x_bmac_rx_disable(bp, params->port); @@ -6532,7 +6576,6 @@ static int bnx2x_update_link_down(struct link_params *params, (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); } if (CHIP_IS_E3(bp)) { - /* Prevent LPI Generation by chip */ REG_WR(bp, MISC_REG_CPMU_LP_FW_ENABLE_P0 + (params->port << 2), 0); REG_WR(bp, MISC_REG_CPMU_LP_DR_ENABLE, 0); @@ -6623,10 +6666,10 @@ static int bnx2x_update_link_up(struct link_params *params, rc |= bnx2x_pbf_update(params, vars->flow_ctrl, vars->line_speed); - /* Disable drain */ + /* disable drain */ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0); - /* Update shared memory */ + /* update shared memory */ bnx2x_update_mng(params, vars->link_status); bnx2x_update_mng_eee(params, vars->eee_status); /* Check remote fault */ @@ -6694,7 +6737,7 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars) REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68), REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)); - /* Disable emac */ + /* disable emac */ if (!CHIP_IS_E3(bp)) REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); @@ -6839,11 +6882,11 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars) } else if (prev_line_speed != vars->line_speed) { REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 0); - usleep_range(1000, 2000); + msleep(1); } } - /* Anything 10 and over uses the bmac */ + /* anything 10 and over uses the bmac */ link_10g_plus = (vars->line_speed >= SPEED_10000); bnx2x_link_int_ack(params, vars, link_10g_plus); @@ -6909,7 +6952,7 @@ void bnx2x_ext_phy_hw_reset(struct bnx2x *bp, u8 port) { bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, MISC_REGISTERS_GPIO_OUTPUT_LOW, port); - usleep_range(1000, 2000); + msleep(1); bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, MISC_REGISTERS_GPIO_OUTPUT_HIGH, port); } @@ -7006,7 +7049,7 @@ static int bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp, MDIO_PMA_REG_GEN_CTRL, 0x0001); - /* Ucode reboot and rst */ + /* ucode reboot and rst */ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_GEN_CTRL, @@ -7050,7 +7093,7 @@ static int bnx2x_8073_8727_external_rom_boot(struct bnx2x *bp, MDIO_PMA_DEVAD, MDIO_PMA_REG_M8051_MSGOUT_REG, &fw_msgout); - usleep_range(1000, 2000); + msleep(1); } while (fw_ver1 == 0 || fw_ver1 == 0x4321 || ((fw_msgout & 0xff) != 0x03 && (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))); @@ -7144,11 +7187,11 @@ static int bnx2x_8073_xaui_wa(struct bnx2x *bp, struct bnx2x_phy *phy) "XAUI workaround has completed\n"); return 0; } - usleep_range(3000, 6000); + msleep(3); } break; } - usleep_range(3000, 6000); + msleep(3); } DP(NETIF_MSG_LINK, "Warning: XAUI work-around timeout !!!\n"); return -EINVAL; @@ -7222,7 +7265,7 @@ static int bnx2x_8073_config_init(struct bnx2x_phy *phy, bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, MISC_REGISTERS_GPIO_OUTPUT_HIGH, gpio_port); - /* Enable LASI */ + /* enable LASI */ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, (1<<2)); bnx2x_cl45_write(bp, phy, @@ -7370,7 +7413,7 @@ static u8 bnx2x_8073_read_status(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "8703 LASI status 0x%x\n", val1); - /* Clear the interrupt LASI status register */ + /* clear the interrupt LASI status register */ bnx2x_cl45_read(bp, phy, MDIO_PCS_DEVAD, MDIO_PCS_REG_STATUS, &val2); bnx2x_cl45_read(bp, phy, @@ -7695,7 +7738,7 @@ static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy, struct bnx2x *bp = params->bp; u16 val = 0; u16 i; - if (byte_cnt > SFP_EEPROM_PAGE_SIZE) { + if (byte_cnt > 16) { DP(NETIF_MSG_LINK, "Reading from eeprom is limited to 0xf\n"); return -EINVAL; @@ -7749,7 +7792,7 @@ static int bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy, if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE) return 0; - usleep_range(1000, 2000); + msleep(1); } return -EINVAL; } @@ -7764,8 +7807,7 @@ static int bnx2x_warpcore_read_sfp_module_eeprom(struct bnx2x_phy *phy, u32 data_array[4]; u16 addr32; struct bnx2x *bp = params->bp; - - if (byte_cnt > SFP_EEPROM_PAGE_SIZE) { + if (byte_cnt > 16) { DP(NETIF_MSG_LINK, "Reading from eeprom is limited to 16 bytes\n"); return -EINVAL; @@ -7795,7 +7837,7 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy, struct bnx2x *bp = params->bp; u16 val, i; - if (byte_cnt > SFP_EEPROM_PAGE_SIZE) { + if (byte_cnt > 16) { DP(NETIF_MSG_LINK, "Reading from eeprom is limited to 0xf\n"); return -EINVAL; @@ -7832,7 +7874,7 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy, /* Wait appropriate time for two-wire command to finish before * polling the status register */ - usleep_range(1000, 2000); + msleep(1); /* Wait up to 500us for command complete status */ for (i = 0; i < 100; i++) { @@ -7868,7 +7910,7 @@ static int bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy, if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE) return 0; - usleep_range(1000, 2000); + msleep(1); } return -EINVAL; @@ -7878,7 +7920,7 @@ int bnx2x_read_sfp_module_eeprom(struct bnx2x_phy *phy, struct link_params *params, u16 addr, u8 byte_cnt, u8 *o_buf) { - int rc = -EOPNOTSUPP; + int rc = -EINVAL; switch (phy->type) { case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726: rc = bnx2x_8726_read_sfp_module_eeprom(phy, params, addr, @@ -7903,7 +7945,7 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, { struct bnx2x *bp = params->bp; u32 sync_offset = 0, phy_idx, media_types; - u8 val[2], check_limiting_mode = 0; + u8 val, check_limiting_mode = 0; *edc_mode = EDC_MODE_LIMITING; phy->media_type = ETH_PHY_UNSPECIFIED; @@ -7911,13 +7953,13 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, if (bnx2x_read_sfp_module_eeprom(phy, params, SFP_EEPROM_CON_TYPE_ADDR, - 2, - (u8 *)val) != 0) { + 1, + &val) != 0) { DP(NETIF_MSG_LINK, "Failed to read from SFP+ module EEPROM\n"); return -EINVAL; } - switch (val[0]) { + switch (val) { case SFP_EEPROM_CON_TYPE_VAL_COPPER: { u8 copper_module_type; @@ -7955,29 +7997,13 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy, break; } case SFP_EEPROM_CON_TYPE_VAL_LC: + phy->media_type = ETH_PHY_SFP_FIBER; + DP(NETIF_MSG_LINK, "Optic module detected\n"); check_limiting_mode = 1; - if ((val[1] & (SFP_EEPROM_COMP_CODE_SR_MASK | - SFP_EEPROM_COMP_CODE_LR_MASK | - SFP_EEPROM_COMP_CODE_LRM_MASK)) == 0) { - DP(NETIF_MSG_LINK, "1G Optic module detected\n"); - phy->media_type = ETH_PHY_SFP_1G_FIBER; - phy->req_line_speed = SPEED_1000; - } else { - int idx, cfg_idx = 0; - DP(NETIF_MSG_LINK, "10G Optic module detected\n"); - for (idx = INT_PHY; idx < MAX_PHYS; idx++) { - if (params->phy[idx].type == phy->type) { - cfg_idx = LINK_CONFIG_IDX(idx); - break; - } - } - phy->media_type = ETH_PHY_SFPP_10G_FIBER; - phy->req_line_speed = params->req_line_speed[cfg_idx]; - } break; default: DP(NETIF_MSG_LINK, "Unable to determine module type 0x%x !!!\n", - val[0]); + val); return -EINVAL; } sync_offset = params->shmem_base + @@ -8063,7 +8089,7 @@ static int bnx2x_verify_sfp_module(struct bnx2x_phy *phy, return 0; } - /* Format the warning message */ + /* format the warning message */ if (bnx2x_read_sfp_module_eeprom(phy, params, SFP_EEPROM_VENDOR_NAME_ADDR, @@ -8109,7 +8135,7 @@ static int bnx2x_wait_for_sfp_module_initialized(struct bnx2x_phy *phy, timeout * 5); return 0; } - usleep_range(5000, 10000); + msleep(5); } return -EINVAL; } @@ -8444,7 +8470,7 @@ int bnx2x_sfp_module_detection(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "Failed to get valid module type\n"); return -EINVAL; } else if (bnx2x_verify_sfp_module(phy, params) != 0) { - /* Check SFP+ module compatibility */ + /* check SFP+ module compatibility */ DP(NETIF_MSG_LINK, "Module verification failed!!\n"); rc = -EINVAL; /* Turn on fault module-detected led */ @@ -8507,34 +8533,14 @@ void bnx2x_handle_module_detect_int(struct link_params *params) /* Call the handling function in case module is detected */ if (gpio_val == 0) { - bnx2x_set_mdio_clk(bp, params->chip_id, params->port); - bnx2x_set_aer_mmd(params, phy); - bnx2x_power_sfp_module(params, phy, 1); bnx2x_set_gpio_int(bp, gpio_num, MISC_REGISTERS_GPIO_INT_OUTPUT_CLR, gpio_port); - if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0) { + if (bnx2x_wait_for_sfp_module_initialized(phy, params) == 0) bnx2x_sfp_module_detection(phy, params); - if (CHIP_IS_E3(bp)) { - u16 rx_tx_in_reset; - /* In case WC is out of reset, reconfigure the - * link speed while taking into account 1G - * module limitation. - */ - bnx2x_cl45_read(bp, phy, - MDIO_WC_DEVAD, - MDIO_WC_REG_DIGITAL5_MISC6, - &rx_tx_in_reset); - if (!rx_tx_in_reset) { - bnx2x_warpcore_reset_lane(bp, phy, 1); - bnx2x_warpcore_config_sfi(phy, params); - bnx2x_warpcore_reset_lane(bp, phy, 0); - } - } - } else { + else DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n"); - } } else { u32 val = REG_RD(bp, params->shmem_base + offsetof(struct shmem_region, dev_info. @@ -8595,7 +8601,7 @@ static u8 bnx2x_8706_8726_read_status(struct bnx2x_phy *phy, bnx2x_sfp_mask_fault(bp, phy, MDIO_PMA_LASI_TXSTAT, MDIO_PMA_LASI_TXCTRL); - /* Clear LASI indication*/ + /* clear LASI indication*/ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_STAT, &val1); bnx2x_cl45_read(bp, phy, @@ -8663,7 +8669,7 @@ static u8 bnx2x_8706_config_init(struct bnx2x_phy *phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_ROM_VER1, &val); if (val) break; - usleep_range(10000, 20000); + msleep(10); } DP(NETIF_MSG_LINK, "XGXS 8706 is initialized after %d ms\n", cnt); if ((params->feature_config_flags & @@ -8792,7 +8798,7 @@ static void bnx2x_8726_external_rom_boot(struct bnx2x_phy *phy, MDIO_PMA_REG_GEN_CTRL, MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); - /* Wait for 150ms for microcode load */ + /* wait for 150ms for microcode load */ msleep(150); /* Disable serial boot control, tristates pins SS_N, SCK, MOSI, MISO */ @@ -8986,63 +8992,6 @@ static void bnx2x_8727_hw_reset(struct bnx2x_phy *phy, MISC_REGISTERS_GPIO_OUTPUT_LOW, port); } -static void bnx2x_8727_config_speed(struct bnx2x_phy *phy, - struct link_params *params) -{ - struct bnx2x *bp = params->bp; - u16 tmp1, val; - /* Set option 1G speed */ - if ((phy->req_line_speed == SPEED_1000) || - (phy->media_type == ETH_PHY_SFP_1G_FIBER)) { - DP(NETIF_MSG_LINK, "Setting 1G force\n"); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD); - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1); - DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1); - /* Power down the XAUI until link is up in case of dual-media - * and 1G - */ - if (DUAL_MEDIA(params)) { - bnx2x_cl45_read(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8727_PCS_GP, &val); - val |= (3<<10); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, - MDIO_PMA_REG_8727_PCS_GP, val); - } - } else if ((phy->req_line_speed == SPEED_AUTO_NEG) && - ((phy->speed_cap_mask & - PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) && - ((phy->speed_cap_mask & - PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) != - PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { - - DP(NETIF_MSG_LINK, "Setting 1G clause37\n"); - bnx2x_cl45_write(bp, phy, - MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, 0); - bnx2x_cl45_write(bp, phy, - MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300); - } else { - /* Since the 8727 has only single reset pin, need to set the 10G - * registers although it is default - */ - bnx2x_cl45_write(bp, phy, - MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, - 0x0020); - bnx2x_cl45_write(bp, phy, - MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x0100); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040); - bnx2x_cl45_write(bp, phy, - MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, - 0x0008); - } -} - static int bnx2x_8727_config_init(struct bnx2x_phy *phy, struct link_params *params, struct link_vars *vars) @@ -9060,7 +9009,7 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy, lasi_ctrl_val = 0x0006; DP(NETIF_MSG_LINK, "Initializing BCM8727\n"); - /* Enable LASI */ + /* enable LASI */ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXCTRL, rx_alarm_ctrl_val); @@ -9112,7 +9061,56 @@ static int bnx2x_8727_config_init(struct bnx2x_phy *phy, bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_LASI_RXSTAT, &tmp1); - bnx2x_8727_config_speed(phy, params); + /* Set option 1G speed */ + if (phy->req_line_speed == SPEED_1000) { + DP(NETIF_MSG_LINK, "Setting 1G force\n"); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x40); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, 0xD); + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, &tmp1); + DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1); + /* Power down the XAUI until link is up in case of dual-media + * and 1G + */ + if (DUAL_MEDIA(params)) { + bnx2x_cl45_read(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_PCS_GP, &val); + val |= (3<<10); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, + MDIO_PMA_REG_8727_PCS_GP, val); + } + } else if ((phy->req_line_speed == SPEED_AUTO_NEG) && + ((phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) && + ((phy->speed_cap_mask & + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) != + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) { + + DP(NETIF_MSG_LINK, "Setting 1G clause37\n"); + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, 0); + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x1300); + } else { + /* Since the 8727 has only single reset pin, need to set the 10G + * registers although it is default + */ + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_8727_MISC_CTRL, + 0x0020); + bnx2x_cl45_write(bp, phy, + MDIO_AN_DEVAD, MDIO_AN_REG_CL37_AN, 0x0100); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 0x2040); + bnx2x_cl45_write(bp, phy, + MDIO_PMA_DEVAD, MDIO_PMA_REG_10G_CTRL2, + 0x0008); + } + /* Set 2-wire transfer rate of SFP+ module EEPROM * to 100Khz since some DACs(direct attached cables) do * not work at 400Khz. @@ -9239,9 +9237,6 @@ static void bnx2x_8727_handle_mod_abs(struct bnx2x_phy *phy, bnx2x_sfp_module_detection(phy, params); else DP(NETIF_MSG_LINK, "SFP+ module is not initialized\n"); - - /* Reconfigure link speed based on module type limitations */ - bnx2x_8727_config_speed(phy, params); } DP(NETIF_MSG_LINK, "8727 RX_ALARM_STATUS 0x%x\n", @@ -9736,7 +9731,7 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy, MDIO_84833_CMD_HDLR_STATUS, &val); if (val == PHY84833_STATUS_CMD_OPEN_FOR_CMDS) break; - usleep_range(1000, 2000); + msleep(1); } if (idx >= PHY84833_CMDHDLR_WAIT) { DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n"); @@ -9757,7 +9752,7 @@ static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy, if ((val == PHY84833_STATUS_CMD_COMPLETE_PASS) || (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) break; - usleep_range(1000, 2000); + msleep(1); } if ((idx >= PHY84833_CMDHDLR_WAIT) || (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) { @@ -9927,7 +9922,7 @@ static int bnx2x_8483x_disable_eee(struct bnx2x_phy *phy, /* Prevent Phy from working in EEE and advertising it */ rc = bnx2x_84833_cmd_hdlr(phy, params, PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1); - if (rc) { + if (rc != 0) { DP(NETIF_MSG_LINK, "EEE disable failed.\n"); return rc; } @@ -9950,7 +9945,7 @@ static int bnx2x_8483x_enable_eee(struct bnx2x_phy *phy, rc = bnx2x_84833_cmd_hdlr(phy, params, PHY84833_CMD_SET_EEE_MODE, &cmd_args, 1); - if (rc) { + if (rc != 0) { DP(NETIF_MSG_LINK, "EEE enable failed.\n"); return rc; } @@ -9978,7 +9973,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, u16 cmd_args[PHY84833_CMDHDLR_MAX_ARGS]; int rc = 0; - usleep_range(1000, 2000); + msleep(1); if (!(CHIP_IS_E1(bp))) port = BP_PATH(bp); @@ -10067,7 +10062,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, rc = bnx2x_84833_cmd_hdlr(phy, params, PHY84833_CMD_SET_EEE_MODE, cmd_args, PHY84833_CMDHDLR_MAX_ARGS); - if (rc) + if (rc != 0) DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n"); } if (initialize) @@ -10111,7 +10106,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, vars->eee_status &= ~SHMEM_EEE_REQUESTED_BIT; rc = bnx2x_8483x_eee_timers(params, vars); - if (rc) { + if (rc != 0) { DP(NETIF_MSG_LINK, "Failed to configure EEE timers\n"); bnx2x_8483x_disable_eee(phy, params, vars); return rc; @@ -10124,7 +10119,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy, rc = bnx2x_8483x_enable_eee(phy, params, vars); else rc = bnx2x_8483x_disable_eee(phy, params, vars); - if (rc) { + if (rc != 0) { DP(NETIF_MSG_LINK, "Failed to set EEE advertisment\n"); return rc; } @@ -10187,19 +10182,17 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "Legacy speed status = 0x%x\n", legacy_status); link_up = ((legacy_status & (1<<11)) == (1<<11)); - legacy_speed = (legacy_status & (3<<9)); - if (legacy_speed == (0<<9)) - vars->line_speed = SPEED_10; - else if (legacy_speed == (1<<9)) - vars->line_speed = SPEED_100; - else if (legacy_speed == (2<<9)) - vars->line_speed = SPEED_1000; - else { /* Should not happen: Treat as link down */ - vars->line_speed = 0; - link_up = 0; - } - if (link_up) { + legacy_speed = (legacy_status & (3<<9)); + if (legacy_speed == (0<<9)) + vars->line_speed = SPEED_10; + else if (legacy_speed == (1<<9)) + vars->line_speed = SPEED_100; + else if (legacy_speed == (2<<9)) + vars->line_speed = SPEED_1000; + else /* Should not happen */ + vars->line_speed = 0; + if (legacy_status & (1<<8)) vars->duplex = DUPLEX_FULL; else @@ -10227,7 +10220,7 @@ static u8 bnx2x_848xx_read_status(struct bnx2x_phy *phy, } } if (link_up) { - DP(NETIF_MSG_LINK, "BCM848x3: link speed is %d\n", + DP(NETIF_MSG_LINK, "BCM84823: link speed is %d\n", vars->line_speed); bnx2x_ext_phy_resolve_fc(phy, params, vars); @@ -10569,7 +10562,7 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy, u32 cfg_pin; DP(NETIF_MSG_LINK, "54618SE cfg init\n"); - usleep_range(1000, 2000); + usleep_range(1000, 1000); /* This works with E3 only, no need to check the chip * before determining the port. @@ -10638,7 +10631,7 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy, MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) fc_val |= MDIO_AN_REG_ADV_PAUSE_PAUSE; - /* Read all advertisement */ + /* read all advertisement */ bnx2x_cl22_read(bp, phy, 0x09, &an_1000_val); @@ -10675,7 +10668,7 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy, 0x09, &an_1000_val); - /* Set 100 speed advertisement */ + /* set 100 speed advertisement */ if (((phy->req_line_speed == SPEED_AUTO_NEG) && (phy->speed_cap_mask & (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL | @@ -10689,7 +10682,7 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "Advertising 100M\n"); } - /* Set 10 speed advertisement */ + /* set 10 speed advertisement */ if (((phy->req_line_speed == SPEED_AUTO_NEG) && (phy->speed_cap_mask & (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL | @@ -10828,7 +10821,7 @@ static u8 bnx2x_54618se_read_status(struct bnx2x_phy *phy, /* Get speed operation status */ bnx2x_cl22_read(bp, phy, - MDIO_REG_GPHY_AUX_STATUS, + 0x19, &legacy_status); DP(NETIF_MSG_LINK, "54618SE read_status: 0x%x\n", legacy_status); @@ -11055,7 +11048,7 @@ static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy, DP(NETIF_MSG_LINK, "10G-base-T PMA status 0x%x->0x%x\n", val2, val1); link_up = ((val1 & 4) == 4); - /* If link is up print the AN outcome of the SFX7101 PHY */ + /* if link is up print the AN outcome of the SFX7101 PHY */ if (link_up) { bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_MASTER_STATUS, @@ -11067,7 +11060,7 @@ static u8 bnx2x_7101_read_status(struct bnx2x_phy *phy, bnx2x_ext_phy_10G_an_resolve(bp, phy, vars); bnx2x_ext_phy_resolve_fc(phy, params, vars); - /* Read LP advertised speeds */ + /* read LP advertised speeds */ if (val2 & (1<<11)) vars->link_status |= LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE; @@ -11386,7 +11379,7 @@ static struct bnx2x_phy phy_8706 = { SUPPORTED_FIBRE | SUPPORTED_Pause | SUPPORTED_Asym_Pause), - .media_type = ETH_PHY_SFPP_10G_FIBER, + .media_type = ETH_PHY_SFP_FIBER, .ver_addr = 0, .req_flow_ctrl = 0, .req_line_speed = 0, @@ -11725,7 +11718,7 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port, SUPPORTED_FIBRE | SUPPORTED_Pause | SUPPORTED_Asym_Pause); - phy->media_type = ETH_PHY_SFPP_10G_FIBER; + phy->media_type = ETH_PHY_SFP_FIBER; break; case PORT_HW_CFG_NET_SERDES_IF_KR: phy->media_type = ETH_PHY_KR; @@ -12265,7 +12258,7 @@ int bnx2x_phy_init(struct link_params *params, struct link_vars *vars) vars->mac_type = MAC_TYPE_NONE; vars->phy_flags = 0; - /* Disable attentions */ + /* disable attentions */ bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + params->port*4, (NIG_MASK_XGXS0_LINK_STATUS | NIG_MASK_XGXS0_LINK10G | @@ -12325,7 +12318,7 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars, struct bnx2x *bp = params->bp; u8 phy_index, port = params->port, clear_latch_ind = 0; DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port); - /* Disable attentions */ + /* disable attentions */ vars->link_status = 0; bnx2x_update_mng(params, vars->link_status); vars->eee_status &= ~(SHMEM_EEE_LP_ADV_STATUS_MASK | @@ -12337,10 +12330,10 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars, NIG_MASK_SERDES0_LINK_STATUS | NIG_MASK_MI_INT)); - /* Activate nig drain */ + /* activate nig drain */ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1); - /* Disable nig egress interface */ + /* disable nig egress interface */ if (!CHIP_IS_E3(bp)) { REG_WR(bp, NIG_REG_BMAC0_OUT_EN + port*4, 0); REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0); @@ -12353,15 +12346,15 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars, bnx2x_xmac_disable(params); bnx2x_umac_disable(params); } - /* Disable emac */ + /* disable emac */ if (!CHIP_IS_E3(bp)) REG_WR(bp, NIG_REG_NIG_EMAC0_EN + port*4, 0); - usleep_range(10000, 20000); + msleep(10); /* The PHY reset is controlled by GPIO 1 * Hold it as vars low */ - /* Clear link led */ + /* clear link led */ bnx2x_set_mdio_clk(bp, params->chip_id, port); bnx2x_set_led(params, vars, LED_MODE_OFF, 0); @@ -12391,9 +12384,9 @@ int bnx2x_link_reset(struct link_params *params, struct link_vars *vars, params->phy[INT_PHY].link_reset( ¶ms->phy[INT_PHY], params); - /* Disable nig ingress interface */ + /* disable nig ingress interface */ if (!CHIP_IS_E3(bp)) { - /* Reset BigMac */ + /* reset BigMac */ REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)); REG_WR(bp, NIG_REG_BMAC0_IN_EN + port*4, 0); @@ -12450,7 +12443,7 @@ static int bnx2x_8073_common_init_phy(struct bnx2x *bp, DP(NETIF_MSG_LINK, "populate_phy failed\n"); return -EINVAL; } - /* Disable attentions */ + /* disable attentions */ bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port_of_path*4, (NIG_MASK_XGXS0_LINK_STATUS | @@ -12524,7 +12517,7 @@ static int bnx2x_8073_common_init_phy(struct bnx2x *bp, bnx2x_cl45_write(bp, phy_blk[port], MDIO_PMA_DEVAD, MDIO_PMA_REG_TX_POWER_DOWN, (val & (~(1<<10)))); - usleep_range(15000, 30000); + msleep(15); /* Read modify write the SPI-ROM version select register */ bnx2x_cl45_read(bp, phy_blk[port], @@ -12556,7 +12549,7 @@ static int bnx2x_8726_common_init_phy(struct bnx2x *bp, REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val); bnx2x_ext_phy_hw_reset(bp, 0); - usleep_range(5000, 10000); + msleep(5); for (port = 0; port < PORT_MAX; port++) { u32 shmem_base, shmem2_base; @@ -12663,11 +12656,11 @@ static int bnx2x_8727_common_init_phy(struct bnx2x *bp, /* Initiate PHY reset*/ bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_LOW, port); - usleep_range(1000, 2000); + msleep(1); bnx2x_set_gpio(bp, reset_gpio, MISC_REGISTERS_GPIO_OUTPUT_HIGH, port); - usleep_range(5000, 10000); + msleep(5); /* PART1 - Reset both phys */ for (port = PORT_MAX - 1; port >= PORT_0; port--) { @@ -12761,7 +12754,7 @@ static int bnx2x_84833_pre_init_phy(struct bnx2x *bp, MDIO_PMA_REG_CTRL, &val); if (!(val & (1<<15))) break; - usleep_range(1000, 2000); + msleep(1); } if (cnt >= 1500) { DP(NETIF_MSG_LINK, "84833 reset timeout\n"); @@ -12851,7 +12844,7 @@ static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[], break; } - if (rc) + if (rc != 0) netdev_err(bp->dev, "Warning: PHY was not initialized," " Port %d\n", 0); @@ -12932,41 +12925,30 @@ static void bnx2x_check_over_curr(struct link_params *params, vars->phy_flags &= ~PHY_OVER_CURRENT_FLAG; } -/* Returns 0 if no change occured since last check; 1 otherwise. */ -static u8 bnx2x_analyze_link_error(struct link_params *params, - struct link_vars *vars, u32 status, - u32 phy_flag, u32 link_flag, u8 notify) +static void bnx2x_analyze_link_error(struct link_params *params, + struct link_vars *vars, u32 lss_status, + u8 notify) { struct bnx2x *bp = params->bp; /* Compare new value with previous value */ u8 led_mode; - u32 old_status = (vars->phy_flags & phy_flag) ? 1 : 0; + u32 half_open_conn = (vars->phy_flags & PHY_HALF_OPEN_CONN_FLAG) > 0; - if ((status ^ old_status) == 0) - return 0; + if ((lss_status ^ half_open_conn) == 0) + return; /* If values differ */ - switch (phy_flag) { - case PHY_HALF_OPEN_CONN_FLAG: - DP(NETIF_MSG_LINK, "Analyze Remote Fault\n"); - break; - case PHY_SFP_TX_FAULT_FLAG: - DP(NETIF_MSG_LINK, "Analyze TX Fault\n"); - break; - default: - DP(NETIF_MSG_LINK, "Analyze UNKOWN\n"); - } - DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up, - old_status, status); + DP(NETIF_MSG_LINK, "Link changed:%x %x->%x\n", vars->link_up, + half_open_conn, lss_status); /* a. Update shmem->link_status accordingly * b. Update link_vars->link_up */ - if (status) { + if (lss_status) { + DP(NETIF_MSG_LINK, "Remote Fault detected !!!\n"); vars->link_status &= ~LINK_STATUS_LINK_UP; - vars->link_status |= link_flag; vars->link_up = 0; - vars->phy_flags |= phy_flag; + vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG; /* activate nig drain */ REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + params->port*4, 1); @@ -12975,10 +12957,10 @@ static u8 bnx2x_analyze_link_error(struct link_params *params, */ led_mode = LED_MODE_OFF; } else { + DP(NETIF_MSG_LINK, "Remote Fault cleared\n"); vars->link_status |= LINK_STATUS_LINK_UP; - vars->link_status &= ~link_flag; vars->link_up = 1; - vars->phy_flags &= ~phy_flag; + vars->phy_flags &= ~PHY_HALF_OPEN_CONN_FLAG; led_mode = LED_MODE_OPER; /* Clear nig drain */ @@ -12995,8 +12977,6 @@ static u8 bnx2x_analyze_link_error(struct link_params *params, vars->periodic_flags |= PERIODIC_FLAGS_LINK_EVENT; if (notify) bnx2x_notify_link_changed(bp); - - return 1; } /****************************************************************************** @@ -13038,9 +13018,7 @@ int bnx2x_check_half_open_conn(struct link_params *params, if (REG_RD(bp, mac_base + XMAC_REG_RX_LSS_STATUS)) lss_status = 1; - bnx2x_analyze_link_error(params, vars, lss_status, - PHY_HALF_OPEN_CONN_FLAG, - LINK_STATUS_NONE, notify); + bnx2x_analyze_link_error(params, vars, lss_status, notify); } else if (REG_RD(bp, MISC_REG_RESET_REG_2) & (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) { /* Check E1X / E2 BMAC */ @@ -13057,55 +13035,11 @@ int bnx2x_check_half_open_conn(struct link_params *params, REG_RD_DMAE(bp, mac_base + lss_status_reg, wb_data, 2); lss_status = (wb_data[0] > 0); - bnx2x_analyze_link_error(params, vars, lss_status, - PHY_HALF_OPEN_CONN_FLAG, - LINK_STATUS_NONE, notify); + bnx2x_analyze_link_error(params, vars, lss_status, notify); } return 0; } -static void bnx2x_sfp_tx_fault_detection(struct bnx2x_phy *phy, - struct link_params *params, - struct link_vars *vars) -{ - struct bnx2x *bp = params->bp; - u32 cfg_pin, value = 0; - u8 led_change, port = params->port; - - /* Get The SFP+ TX_Fault controlling pin ([eg]pio) */ - cfg_pin = (REG_RD(bp, params->shmem_base + offsetof(struct shmem_region, - dev_info.port_hw_config[port].e3_cmn_pin_cfg)) & - PORT_HW_CFG_E3_TX_FAULT_MASK) >> - PORT_HW_CFG_E3_TX_FAULT_SHIFT; - if (bnx2x_get_cfg_pin(bp, cfg_pin, &value)) { - DP(NETIF_MSG_LINK, "Failed to read pin 0x%02x\n", cfg_pin); - return; - } - - led_change = bnx2x_analyze_link_error(params, vars, value, - PHY_SFP_TX_FAULT_FLAG, - LINK_STATUS_SFP_TX_FAULT, 1); - - if (led_change) { - /* Change TX_Fault led, set link status for further syncs */ - u8 led_mode; - - if (vars->phy_flags & PHY_SFP_TX_FAULT_FLAG) { - led_mode = MISC_REGISTERS_GPIO_HIGH; - vars->link_status |= LINK_STATUS_SFP_TX_FAULT; - } else { - led_mode = MISC_REGISTERS_GPIO_LOW; - vars->link_status &= ~LINK_STATUS_SFP_TX_FAULT; - } - - /* If module is unapproved, led should be on regardless */ - if (!(phy->flags & FLAGS_SFP_NOT_APPROVED)) { - DP(NETIF_MSG_LINK, "Change TX_Fault LED: ->%x\n", - led_mode); - bnx2x_set_e3_module_fault_led(params, led_mode); - } - } -} void bnx2x_period_func(struct link_params *params, struct link_vars *vars) { u16 phy_idx; @@ -13124,26 +13058,7 @@ void bnx2x_period_func(struct link_params *params, struct link_vars *vars) struct bnx2x_phy *phy = ¶ms->phy[INT_PHY]; bnx2x_set_aer_mmd(params, phy); bnx2x_check_over_curr(params, vars); - if (vars->rx_tx_asic_rst) - bnx2x_warpcore_config_runtime(phy, params, vars); - - if ((REG_RD(bp, params->shmem_base + - offsetof(struct shmem_region, dev_info. - port_hw_config[params->port].default_cfg)) - & PORT_HW_CFG_NET_SERDES_IF_MASK) == - PORT_HW_CFG_NET_SERDES_IF_SFI) { - if (bnx2x_is_sfp_module_plugged(phy, params)) { - bnx2x_sfp_tx_fault_detection(phy, params, vars); - } else if (vars->link_status & - LINK_STATUS_SFP_TX_FAULT) { - /* Clean trail, interrupt corrects the leds */ - vars->link_status &= ~LINK_STATUS_SFP_TX_FAULT; - vars->phy_flags &= ~PHY_SFP_TX_FAULT_FLAG; - /* Update link status in the shared memory */ - bnx2x_update_mng(params, vars->link_status); - } - } - + bnx2x_warpcore_config_runtime(phy, params, vars); } } diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h index c05f9d94938f..e920800a7bc5 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h @@ -41,7 +41,6 @@ #define SPEED_AUTO_NEG 0 #define SPEED_20000 20000 -#define SFP_EEPROM_PAGE_SIZE 16 #define SFP_EEPROM_VENDOR_NAME_ADDR 0x14 #define SFP_EEPROM_VENDOR_NAME_SIZE 16 #define SFP_EEPROM_VENDOR_OUI_ADDR 0x25 @@ -126,11 +125,6 @@ typedef void (*set_link_led_t)(struct bnx2x_phy *phy, struct link_params *params, u8 mode); typedef void (*phy_specific_func_t)(struct bnx2x_phy *phy, struct link_params *params, u32 action); -struct bnx2x_reg_set { - u8 devad; - u16 reg; - u16 val; -}; struct bnx2x_phy { u32 type; @@ -169,15 +163,14 @@ struct bnx2x_phy { u32 supported; u32 media_type; -#define ETH_PHY_UNSPECIFIED 0x0 -#define ETH_PHY_SFPP_10G_FIBER 0x1 -#define ETH_PHY_XFP_FIBER 0x2 -#define ETH_PHY_DA_TWINAX 0x3 -#define ETH_PHY_BASE_T 0x4 -#define ETH_PHY_SFP_1G_FIBER 0x5 -#define ETH_PHY_KR 0xf0 -#define ETH_PHY_CX4 0xf1 -#define ETH_PHY_NOT_PRESENT 0xff +#define ETH_PHY_UNSPECIFIED 0x0 +#define ETH_PHY_SFP_FIBER 0x1 +#define ETH_PHY_XFP_FIBER 0x2 +#define ETH_PHY_DA_TWINAX 0x3 +#define ETH_PHY_BASE_T 0x4 +#define ETH_PHY_KR 0xf0 +#define ETH_PHY_CX4 0xf1 +#define ETH_PHY_NOT_PRESENT 0xff /* The address in which version is located*/ u32 ver_addr; @@ -314,7 +307,6 @@ struct link_vars { #define PHY_PHYSICAL_LINK_FLAG (1<<2) #define PHY_HALF_OPEN_CONN_FLAG (1<<3) #define PHY_OVER_CURRENT_FLAG (1<<4) -#define PHY_SFP_TX_FAULT_FLAG (1<<5) u8 mac_type; #define MAC_TYPE_NONE 0 diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 8ddc78e0d945..a622bb7bf21d 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -104,7 +104,7 @@ MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature"); #define INT_MODE_INTx 1 #define INT_MODE_MSI 2 -int int_mode; +static int int_mode; module_param(int_mode, int, 0); MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X " "(1 INT#x; 2 MSI)"); @@ -758,7 +758,7 @@ void bnx2x_panic_dump(struct bnx2x *bp) /* Tx */ for_each_cos_in_tx_queue(fp, cos) { - txdata = *fp->txdata_ptr[cos]; + txdata = fp->txdata[cos]; BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n", i, txdata.tx_pkt_prod, txdata.tx_pkt_cons, txdata.tx_bd_prod, @@ -876,7 +876,7 @@ void bnx2x_panic_dump(struct bnx2x *bp) for_each_tx_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; for_each_cos_in_tx_queue(fp, cos) { - struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; + struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10); end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245); @@ -1583,7 +1583,7 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX; - struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj; + struct bnx2x_queue_sp_obj *q_obj = &fp->q_obj; DP(BNX2X_MSG_SP, "fp %d cid %d got ramrod #%d state is %x type is %d\n", @@ -1710,7 +1710,7 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) /* Handle Rx or Tx according to SB id */ prefetch(fp->rx_cons_sb); for_each_cos_in_tx_queue(fp, cos) - prefetch(fp->txdata_ptr[cos]->tx_cons_sb); + prefetch(fp->txdata[cos].tx_cons_sb); prefetch(&fp->sb_running_index[SM_RX_ID]); napi_schedule(&bnx2x_fp(bp, fp->index, napi)); status &= ~mask; @@ -2124,11 +2124,6 @@ u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) } } - if (load_mode == LOAD_LOOPBACK_EXT) { - struct link_params *lp = &bp->link_params; - lp->loopback_mode = LOOPBACK_EXT; - } - rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars); bnx2x_release_phy_lock(bp); @@ -2921,7 +2916,7 @@ static void bnx2x_pf_tx_q_prep(struct bnx2x *bp, struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init, u8 cos) { - txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping; + txq_init->dscr_map = fp->txdata[cos].tx_desc_mapping; txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos; txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; txq_init->fw_sb_id = fp->fw_sb_id; @@ -3035,9 +3030,9 @@ static void bnx2x_drv_info_ether_stat(struct bnx2x *bp) memcpy(ether_stat->version, DRV_MODULE_VERSION, ETH_STAT_INFO_VERSION_LEN - 1); - bp->sp_objs[0].mac_obj.get_n_elements(bp, &bp->sp_objs[0].mac_obj, - DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, - ether_stat->mac_local); + bp->fp[0].mac_obj.get_n_elements(bp, &bp->fp[0].mac_obj, + DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, + ether_stat->mac_local); ether_stat->mtu_size = bp->dev->mtu; @@ -3068,11 +3063,11 @@ static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp) /* insert FCoE stats from ramrod response */ if (!NO_FCOE(bp)) { struct tstorm_per_queue_stats *fcoe_q_tstorm_stats = - &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)]. + &bp->fw_stats_data->queue_stats[FCOE_IDX]. tstorm_queue_statistics; struct xstorm_per_queue_stats *fcoe_q_xstorm_stats = - &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)]. + &bp->fw_stats_data->queue_stats[FCOE_IDX]. xstorm_queue_statistics; struct fcoe_statistics_params *fw_fcoe_stat = @@ -4628,11 +4623,11 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp, case BNX2X_FILTER_MAC_PENDING: DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n"); #ifdef BCM_CNIC - if (cid == BNX2X_ISCSI_ETH_CID(bp)) + if (cid == BNX2X_ISCSI_ETH_CID) vlan_mac_obj = &bp->iscsi_l2_mac_obj; else #endif - vlan_mac_obj = &bp->sp_objs[cid].mac_obj; + vlan_mac_obj = &bp->fp[cid].mac_obj; break; case BNX2X_FILTER_MCAST_PENDING: @@ -4730,7 +4725,7 @@ static void bnx2x_after_function_update(struct bnx2x *bp) for_each_eth_queue(bp, q) { /* Set the appropriate Queue object */ fp = &bp->fp[q]; - queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; + queue_params.q_obj = &fp->q_obj; /* send the ramrod */ rc = bnx2x_queue_state_change(bp, &queue_params); @@ -4741,8 +4736,8 @@ static void bnx2x_after_function_update(struct bnx2x *bp) #ifdef BCM_CNIC if (!NO_FCOE(bp)) { - fp = &bp->fp[FCOE_IDX(bp)]; - queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; + fp = &bp->fp[FCOE_IDX]; + queue_params.q_obj = &fp->q_obj; /* clear pending completion bit */ __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags); @@ -4774,11 +4769,11 @@ static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj( { DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid); #ifdef BCM_CNIC - if (cid == BNX2X_FCOE_ETH_CID(bp)) - return &bnx2x_fcoe_sp_obj(bp, q_obj); + if (cid == BNX2X_FCOE_ETH_CID) + return &bnx2x_fcoe(bp, q_obj); else #endif - return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj; + return &bnx2x_fp(bp, CID_TO_FP(cid), q_obj); } static void bnx2x_eq_int(struct bnx2x *bp) @@ -5660,15 +5655,15 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx) /* init tx data */ for_each_cos_in_tx_queue(fp, cos) { - bnx2x_init_txdata(bp, fp->txdata_ptr[cos], - CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp), - FP_COS_TO_TXQ(fp, cos, bp), - BNX2X_TX_SB_INDEX_BASE + cos, fp); - cids[cos] = fp->txdata_ptr[cos]->cid; + bnx2x_init_txdata(bp, &fp->txdata[cos], + CID_COS_TO_TX_ONLY_CID(fp->cid, cos), + FP_COS_TO_TXQ(fp, cos), + BNX2X_TX_SB_INDEX_BASE + cos); + cids[cos] = fp->txdata[cos].cid; } - bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids, - fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata), + bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, cids, fp->max_cos, + BP_FUNC(bp), bnx2x_sp(bp, q_rdata), bnx2x_sp_mapping(bp, q_rdata), q_type); /** @@ -5719,7 +5714,7 @@ static void bnx2x_init_tx_rings(struct bnx2x *bp) for_each_tx_queue(bp, i) for_each_cos_in_tx_queue(&bp->fp[i], cos) - bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]); + bnx2x_init_tx_ring_one(&bp->fp[i].txdata[cos]); } void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) @@ -7068,10 +7063,12 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; for (i = 0; i < L2_ILT_LINES(bp); i++) { - ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt; + ilt->lines[cdu_ilt_start + i].page = + bp->context.vcxt + (ILT_PAGE_CIDS * i); ilt->lines[cdu_ilt_start + i].page_mapping = - bp->context[i].cxt_mapping; - ilt->lines[cdu_ilt_start + i].size = bp->context[i].size; + bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i); + /* cdu ilt pages are allocated manually so there's no need to + set the size */ } bnx2x_ilt_init_op(bp, INITOP_SET); @@ -7338,8 +7335,6 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) void bnx2x_free_mem(struct bnx2x *bp) { - int i; - /* fastpath */ bnx2x_free_fp_mem(bp); /* end of fastpath */ @@ -7353,9 +7348,9 @@ void bnx2x_free_mem(struct bnx2x *bp) BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, sizeof(struct bnx2x_slowpath)); - for (i = 0; i < L2_ILT_LINES(bp); i++) - BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping, - bp->context[i].size); + BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping, + bp->context.size); + bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE); BNX2X_FREE(bp->ilt->lines); @@ -7441,8 +7436,6 @@ static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp) int bnx2x_alloc_mem(struct bnx2x *bp) { - int i, allocated, context_size; - #ifdef BCM_CNIC if (!CHIP_IS_E1x(bp)) /* size = the status block + ramrod buffers */ @@ -7472,29 +7465,11 @@ int bnx2x_alloc_mem(struct bnx2x *bp) if (bnx2x_alloc_fw_stats_mem(bp)) goto alloc_mem_err; - /* Allocate memory for CDU context: - * This memory is allocated separately and not in the generic ILT - * functions because CDU differs in few aspects: - * 1. There are multiple entities allocating memory for context - - * 'regular' driver, CNIC and SRIOV driver. Each separately controls - * its own ILT lines. - * 2. Since CDU page-size is not a single 4KB page (which is the case - * for the other ILT clients), to be efficient we want to support - * allocation of sub-page-size in the last entry. - * 3. Context pointers are used by the driver to pass to FW / update - * the context (for the other ILT clients the pointers are used just to - * free the memory during unload). - */ - context_size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp); + bp->context.size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp); + + BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping, + bp->context.size); - for (i = 0, allocated = 0; allocated < context_size; i++) { - bp->context[i].size = min(CDU_ILT_PAGE_SZ, - (context_size - allocated)); - BNX2X_PCI_ALLOC(bp->context[i].vcxt, - &bp->context[i].cxt_mapping, - bp->context[i].size); - allocated += bp->context[i].size; - } BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES); if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC)) @@ -7596,8 +7571,8 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set) __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); /* Eth MAC is set on RSS leading client (fp[0]) */ - return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->sp_objs->mac_obj, - set, BNX2X_ETH_MAC, &ramrod_flags); + return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->fp->mac_obj, set, + BNX2X_ETH_MAC, &ramrod_flags); } int bnx2x_setup_leading(struct bnx2x *bp) @@ -7612,7 +7587,7 @@ int bnx2x_setup_leading(struct bnx2x *bp) * * In case of MSI-X it will also try to enable MSI-X. */ -void bnx2x_set_int_mode(struct bnx2x *bp) +static void __devinit bnx2x_set_int_mode(struct bnx2x *bp) { switch (int_mode) { case INT_MODE_MSI: @@ -7623,6 +7598,11 @@ void bnx2x_set_int_mode(struct bnx2x *bp) BNX2X_DEV_INFO("set number of queues to 1\n"); break; default: + /* Set number of queues for MSI-X mode */ + bnx2x_set_num_queues(bp); + + BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues); + /* if we can't use MSI-X we only need one fp, * so try to enable MSI-X with the requested number of fp's * and fallback to MSI or legacy INTx with one fp @@ -7763,8 +7743,6 @@ static void bnx2x_pf_q_prep_init(struct bnx2x *bp, { u8 cos; - int cxt_index, cxt_offset; - /* FCoE Queue uses Default SB, thus has no HC capabilities */ if (!IS_FCOE_FP(fp)) { __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags); @@ -7801,13 +7779,9 @@ static void bnx2x_pf_q_prep_init(struct bnx2x *bp, fp->index, init_params->max_cos); /* set the context pointers queue object */ - for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) { - cxt_index = fp->txdata_ptr[cos]->cid / ILT_PAGE_CIDS; - cxt_offset = fp->txdata_ptr[cos]->cid - (cxt_index * - ILT_PAGE_CIDS); + for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) init_params->cxts[cos] = - &bp->context[cxt_index].vcxt[cxt_offset].eth; - } + &bp->context.vcxt[fp->txdata[cos].cid].eth; } int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp, @@ -7872,7 +7846,7 @@ int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); - q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; + q_params.q_obj = &fp->q_obj; /* We want to wait for completion in this context */ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); @@ -7945,7 +7919,7 @@ static int bnx2x_stop_queue(struct bnx2x *bp, int index) DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid); - q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; + q_params.q_obj = &fp->q_obj; /* We want to wait for completion in this context */ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); @@ -7956,7 +7930,7 @@ static int bnx2x_stop_queue(struct bnx2x *bp, int index) tx_index++){ /* ascertain this is a normal queue*/ - txdata = fp->txdata_ptr[tx_index]; + txdata = &fp->txdata[tx_index]; DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n", txdata->txq_index); @@ -8323,7 +8297,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) struct bnx2x_fastpath *fp = &bp->fp[i]; for_each_cos_in_tx_queue(fp, cos) - rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]); + rc = bnx2x_clean_tx_queue(bp, &fp->txdata[cos]); #ifdef BNX2X_STOP_ON_ERROR if (rc) return; @@ -8334,13 +8308,12 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) usleep_range(1000, 1000); /* Clean all ETH MACs */ - rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC, - false); + rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_ETH_MAC, false); if (rc < 0) BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc); /* Clean up UC list */ - rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC, + rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_UC_LIST_MAC, true); if (rc < 0) BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n", @@ -9732,8 +9705,6 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ? BC_SUPPORTS_PFC_STATS : 0; - bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ? - BC_SUPPORTS_DCBX_MSG_NON_PMF : 0; boot_mode = SHMEM_RD(bp, dev_info.port_feature_config[BP_PORT(bp)].mba_config) & PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK; @@ -11047,7 +11018,7 @@ static int bnx2x_set_uc_list(struct bnx2x *bp) int rc; struct net_device *dev = bp->dev; struct netdev_hw_addr *ha; - struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj; + struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj; unsigned long ramrod_flags = 0; /* First schedule a cleanup up of old configuration */ @@ -11722,7 +11693,7 @@ void bnx2x__init_func_obj(struct bnx2x *bp) /* must be called after sriov-enable */ static int bnx2x_set_qm_cid_count(struct bnx2x *bp) { - int cid_count = BNX2X_L2_MAX_CID(bp); + int cid_count = BNX2X_L2_CID_COUNT(bp); #ifdef BCM_CNIC cid_count += CNIC_CID_MAX; @@ -11767,7 +11738,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, struct bnx2x *bp; int pcie_width, pcie_speed; int rc, max_non_def_sbs; - int rx_count, tx_count, rss_count, doorbell_size; + int rx_count, tx_count, rss_count; /* * An estimated maximum supported CoS number according to the chip * version. @@ -11810,6 +11781,13 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev); + /* !!! FIXME !!! + * Do not allow the maximum SB count to grow above 16 + * since Special CIDs starts from 16*BNX2X_MULTI_TX_COS=48. + * We will use the FP_SB_MAX_E1x macro for this matter. + */ + max_non_def_sbs = min_t(int, FP_SB_MAX_E1x, max_non_def_sbs); + WARN_ON(!max_non_def_sbs); /* Maximum number of RSS queues: one IGU SB goes to CNIC */ @@ -11820,9 +11798,9 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, /* * Maximum number of netdev Tx queues: - * Maximum TSS queues * Maximum supported number of CoS + FCoE L2 + * Maximum TSS queues * Maximum supported number of CoS + FCoE L2 */ - tx_count = rss_count * max_cos_est + FCOE_PRESENT; + tx_count = MAX_TXQS_PER_COS * max_cos_est + FCOE_PRESENT; /* dev zeroed in init_etherdev */ dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count); @@ -11831,6 +11809,9 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, bp = netdev_priv(dev); + BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n", + tx_count, rx_count); + bp->igu_sb_cnt = max_non_def_sbs; bp->msg_enable = debug; pci_set_drvdata(pdev, dev); @@ -11843,9 +11824,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, BNX2X_DEV_INFO("max_non_def_sbs %d\n", max_non_def_sbs); - BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n", - tx_count, rx_count); - rc = bnx2x_init_bp(bp); if (rc) goto init_one_exit; @@ -11854,15 +11832,9 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, * Map doorbels here as we need the real value of bp->max_cos which * is initialized in bnx2x_init_bp(). */ - doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT); - if (doorbell_size > pci_resource_len(pdev, 2)) { - dev_err(&bp->pdev->dev, - "Cannot map doorbells, bar size too small, aborting\n"); - rc = -ENOMEM; - goto init_one_exit; - } bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2), - doorbell_size); + min_t(u64, BNX2X_DB_SIZE(bp), + pci_resource_len(pdev, 2))); if (!bp->doorbells) { dev_err(&bp->pdev->dev, "Cannot map doorbell space, aborting\n"); @@ -11880,12 +11852,8 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, #endif - - /* Set bp->num_queues for MSI-X mode*/ - bnx2x_set_num_queues(bp); - /* Configure interrupt mode: try to enable MSI-X/MSI if - * needed. + * needed, set bp->num_queues appropriately. */ bnx2x_set_int_mode(bp); @@ -12229,7 +12197,6 @@ static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp) static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count) { struct eth_spe *spe; - int cxt_index, cxt_offset; #ifdef BNX2X_STOP_ON_ERROR if (unlikely(bp->panic)) @@ -12252,16 +12219,10 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count) * ramrod */ if (type == ETH_CONNECTION_TYPE) { - if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) { - cxt_index = BNX2X_ISCSI_ETH_CID(bp) / - ILT_PAGE_CIDS; - cxt_offset = BNX2X_ISCSI_ETH_CID(bp) - - (cxt_index * ILT_PAGE_CIDS); - bnx2x_set_ctx_validation(bp, - &bp->context[cxt_index]. - vcxt[cxt_offset].eth, - BNX2X_ISCSI_ETH_CID(bp)); - } + if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) + bnx2x_set_ctx_validation(bp, &bp->context. + vcxt[BNX2X_ISCSI_ETH_CID].eth, + BNX2X_ISCSI_ETH_CID); } /* @@ -12614,21 +12575,6 @@ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp) cp->num_irq = 2; } -void bnx2x_setup_cnic_info(struct bnx2x *bp) -{ - struct cnic_eth_dev *cp = &bp->cnic_eth_dev; - - - cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + - bnx2x_cid_ilt_lines(bp); - cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS; - cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp); - cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp); - - if (NO_ISCSI_OOO(bp)) - cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO; -} - static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops, void *data) { @@ -12707,10 +12653,10 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev) cp->drv_ctl = bnx2x_drv_ctl; cp->drv_register_cnic = bnx2x_register_cnic; cp->drv_unregister_cnic = bnx2x_unregister_cnic; - cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp); + cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID; cp->iscsi_l2_client_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX); - cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp); + cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID; if (NO_ISCSI_OOO(bp)) cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO; diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index f371e3c06094..bfef98f666c9 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h @@ -5913,7 +5913,6 @@ #define MISC_REGISTERS_SPIO_OUTPUT_LOW 0 #define MISC_REGISTERS_SPIO_SET_POS 8 #define HW_LOCK_MAX_RESOURCE_VALUE 31 -#define HW_LOCK_RESOURCE_DCBX_ADMIN_MIB 13 #define HW_LOCK_RESOURCE_DRV_FLAGS 10 #define HW_LOCK_RESOURCE_GPIO 1 #define HW_LOCK_RESOURCE_MDIO 0 @@ -7160,7 +7159,6 @@ Theotherbitsarereservedandshouldbezero*/ #define MDIO_REG_GPHY_EEE_1G (0x1 << 2) #define MDIO_REG_GPHY_EEE_100 (0x1 << 1) #define MDIO_REG_GPHY_EEE_RESOLVED 0x803e -#define MDIO_REG_GPHY_AUX_STATUS 0x19 #define MDIO_REG_INTR_STATUS 0x1a #define MDIO_REG_INTR_MASK 0x1b #define MDIO_REG_INTR_MASK_LINK_STATUS (0x1 << 1) diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 734fd87cd990..6c14b4a4e82c 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -4107,10 +4107,6 @@ static int bnx2x_setup_rss(struct bnx2x *bp, data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY; - if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags)) - data->capabilities |= - ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY; - if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags)) data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY; @@ -4119,10 +4115,6 @@ static int bnx2x_setup_rss(struct bnx2x *bp, data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY; - if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags)) - data->capabilities |= - ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY; - /* Hashing mask */ data->rss_result_mask = p->rss_result_mask; diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h index 76818ef08f9b..efd80bdd0dfe 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h @@ -694,10 +694,8 @@ enum { BNX2X_RSS_IPV4, BNX2X_RSS_IPV4_TCP, - BNX2X_RSS_IPV4_UDP, BNX2X_RSS_IPV6, BNX2X_RSS_IPV6_TCP, - BNX2X_RSS_IPV6_UDP, }; struct bnx2x_config_rss_params { @@ -731,10 +729,6 @@ struct bnx2x_rss_config_obj { /* Last configured indirection table */ u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE]; - /* flags for enabling 4-tupple hash on UDP */ - u8 udp_rss_v4; - u8 udp_rss_v6; - int (*config_rss)(struct bnx2x *bp, struct bnx2x_config_rss_params *p); }; diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index 514a528f6ddf..0e8bdcb9c748 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c @@ -859,22 +859,17 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp) struct tstorm_per_queue_stats *tclient = &bp->fw_stats_data->queue_stats[i]. tstorm_queue_statistics; - struct tstorm_per_queue_stats *old_tclient = - &bnx2x_fp_stats(bp, fp)->old_tclient; + struct tstorm_per_queue_stats *old_tclient = &fp->old_tclient; struct ustorm_per_queue_stats *uclient = &bp->fw_stats_data->queue_stats[i]. ustorm_queue_statistics; - struct ustorm_per_queue_stats *old_uclient = - &bnx2x_fp_stats(bp, fp)->old_uclient; + struct ustorm_per_queue_stats *old_uclient = &fp->old_uclient; struct xstorm_per_queue_stats *xclient = &bp->fw_stats_data->queue_stats[i]. xstorm_queue_statistics; - struct xstorm_per_queue_stats *old_xclient = - &bnx2x_fp_stats(bp, fp)->old_xclient; - struct bnx2x_eth_q_stats *qstats = - &bnx2x_fp_stats(bp, fp)->eth_q_stats; - struct bnx2x_eth_q_stats_old *qstats_old = - &bnx2x_fp_stats(bp, fp)->eth_q_stats_old; + struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient; + struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; + struct bnx2x_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old; u32 diff; @@ -1057,11 +1052,8 @@ static void bnx2x_net_stats_update(struct bnx2x *bp) nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi); tmp = estats->mac_discard; - for_each_rx_queue(bp, i) { - struct tstorm_per_queue_stats *old_tclient = - &bp->fp_stats[i].old_tclient; - tmp += le32_to_cpu(old_tclient->checksum_discard); - } + for_each_rx_queue(bp, i) + tmp += le32_to_cpu(bp->fp[i].old_tclient.checksum_discard); nstats->rx_dropped = tmp + bp->net_stats_old.rx_dropped; nstats->tx_dropped = 0; @@ -1111,9 +1103,9 @@ static void bnx2x_drv_stats_update(struct bnx2x *bp) int i; for_each_queue(bp, i) { - struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats; + struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats; struct bnx2x_eth_q_stats_old *qstats_old = - &bp->fp_stats[i].eth_q_stats_old; + &bp->fp[i].eth_q_stats_old; UPDATE_ESTAT_QSTAT(driver_xoff); UPDATE_ESTAT_QSTAT(rx_err_discard_pkt); @@ -1440,7 +1432,7 @@ static void bnx2x_prep_fw_stats_req(struct bnx2x *bp) query[first_queue_query_index + i]; cur_query_entry->kind = STATS_TYPE_QUEUE; - cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX(bp)]); + cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX]); cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset)); @@ -1491,19 +1483,15 @@ void bnx2x_stats_init(struct bnx2x *bp) /* function stats */ for_each_queue(bp, i) { - struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i]; - - memset(&fp_stats->old_tclient, 0, - sizeof(fp_stats->old_tclient)); - memset(&fp_stats->old_uclient, 0, - sizeof(fp_stats->old_uclient)); - memset(&fp_stats->old_xclient, 0, - sizeof(fp_stats->old_xclient)); + struct bnx2x_fastpath *fp = &bp->fp[i]; + + memset(&fp->old_tclient, 0, sizeof(fp->old_tclient)); + memset(&fp->old_uclient, 0, sizeof(fp->old_uclient)); + memset(&fp->old_xclient, 0, sizeof(fp->old_xclient)); if (bp->stats_init) { - memset(&fp_stats->eth_q_stats, 0, - sizeof(fp_stats->eth_q_stats)); - memset(&fp_stats->eth_q_stats_old, 0, - sizeof(fp_stats->eth_q_stats_old)); + memset(&fp->eth_q_stats, 0, sizeof(fp->eth_q_stats)); + memset(&fp->eth_q_stats_old, 0, + sizeof(fp->eth_q_stats_old)); } } @@ -1545,10 +1533,8 @@ void bnx2x_save_statistics(struct bnx2x *bp) /* save queue statistics */ for_each_eth_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; - struct bnx2x_eth_q_stats *qstats = - &bnx2x_fp_stats(bp, fp)->eth_q_stats; - struct bnx2x_eth_q_stats_old *qstats_old = - &bnx2x_fp_stats(bp, fp)->eth_q_stats_old; + struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; + struct bnx2x_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old; UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi); UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo); @@ -1587,7 +1573,7 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats, struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats; struct bnx2x_eth_stats *estats = &bp->eth_stats; struct per_queue_stats *fcoe_q_stats = - &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)]; + &bp->fw_stats_data->queue_stats[FCOE_IDX]; struct tstorm_per_queue_stats *fcoe_q_tstorm_stats = &fcoe_q_stats->tstorm_queue_statistics; @@ -1604,7 +1590,8 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats, memset(afex_stats, 0, sizeof(struct afex_stats)); for_each_eth_queue(bp, i) { - struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats; + struct bnx2x_fastpath *fp = &bp->fp[i]; + struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; ADD_64(afex_stats->rx_unicast_bytes_hi, qstats->total_unicast_bytes_received_hi, diff --git a/trunk/drivers/net/ethernet/intel/e1000/e1000_main.c b/trunk/drivers/net/ethernet/intel/e1000/e1000_main.c index 183a4a3224ba..7483ca0a6282 100644 --- a/trunk/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/trunk/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -1078,18 +1078,18 @@ static int __devinit e1000_probe(struct pci_dev *pdev, netdev->priv_flags |= IFF_SUPP_NOFCS; netdev->features |= netdev->hw_features; - netdev->hw_features |= (NETIF_F_RXCSUM | - NETIF_F_RXALL | - NETIF_F_RXFCS); + netdev->hw_features |= NETIF_F_RXCSUM; + netdev->hw_features |= NETIF_F_RXALL; + netdev->hw_features |= NETIF_F_RXFCS; if (pci_using_dac) { netdev->features |= NETIF_F_HIGHDMA; netdev->vlan_features |= NETIF_F_HIGHDMA; } - netdev->vlan_features |= (NETIF_F_TSO | - NETIF_F_HW_CSUM | - NETIF_F_SG); + netdev->vlan_features |= NETIF_F_TSO; + netdev->vlan_features |= NETIF_F_HW_CSUM; + netdev->vlan_features |= NETIF_F_SG; netdev->priv_flags |= IFF_UNICAST_FLT; diff --git a/trunk/drivers/net/ethernet/intel/igb/e1000_regs.h b/trunk/drivers/net/ethernet/intel/igb/e1000_regs.h index 10efcd88dca0..35d1e4f2c92c 100644 --- a/trunk/drivers/net/ethernet/intel/igb/e1000_regs.h +++ b/trunk/drivers/net/ethernet/intel/igb/e1000_regs.h @@ -117,7 +117,6 @@ /* TX Rate Limit Registers */ #define E1000_RTTDQSEL 0x3604 /* Tx Desc Plane Queue Select - WO */ -#define E1000_RTTBCNRM 0x3690 /* Tx BCN Rate-scheduler MMW */ #define E1000_RTTBCNRC 0x36B0 /* Tx BCN Rate-Scheduler Config - WO */ /* Split and Replication RX Control - RW */ diff --git a/trunk/drivers/net/ethernet/intel/igb/igb.h b/trunk/drivers/net/ethernet/intel/igb/igb.h index 9e572dd29ab2..ae6d3f393a54 100644 --- a/trunk/drivers/net/ethernet/intel/igb/igb.h +++ b/trunk/drivers/net/ethernet/intel/igb/igb.h @@ -65,30 +65,19 @@ struct igb_adapter; #define MAX_Q_VECTORS 8 /* Transmit and receive queues */ -#define IGB_MAX_RX_QUEUES 8 -#define IGB_MAX_RX_QUEUES_82575 4 +#define IGB_MAX_RX_QUEUES ((adapter->vfs_allocated_count ? 2 : \ + (hw->mac.type > e1000_82575 ? 8 : 4))) +#define IGB_MAX_RX_QUEUES_I210 4 #define IGB_MAX_RX_QUEUES_I211 2 -#define IGB_MAX_TX_QUEUES 8 +#define IGB_MAX_TX_QUEUES 16 +#define IGB_MAX_TX_QUEUES_I210 4 +#define IGB_MAX_TX_QUEUES_I211 2 #define IGB_MAX_VF_MC_ENTRIES 30 #define IGB_MAX_VF_FUNCTIONS 8 #define IGB_MAX_VFTA_ENTRIES 128 #define IGB_82576_VF_DEV_ID 0x10CA #define IGB_I350_VF_DEV_ID 0x1520 -/* NVM version defines */ -#define IGB_MAJOR_MASK 0xF000 -#define IGB_MINOR_MASK 0x0FF0 -#define IGB_BUILD_MASK 0x000F -#define IGB_COMB_VER_MASK 0x00FF -#define IGB_MAJOR_SHIFT 12 -#define IGB_MINOR_SHIFT 4 -#define IGB_COMB_VER_SHFT 8 -#define IGB_NVM_VER_INVALID 0xFFFF -#define IGB_ETRACK_SHIFT 16 -#define NVM_ETRACK_WORD 0x0042 -#define NVM_COMB_VER_OFF 0x0083 -#define NVM_COMB_VER_PTR 0x003d - struct vf_data_storage { unsigned char vf_mac_addresses[ETH_ALEN]; u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES]; @@ -382,7 +371,6 @@ struct igb_adapter { spinlock_t tmreg_lock; struct cyclecounter cc; struct timecounter tc; - char fw_version[32]; }; #define IGB_FLAG_HAS_MSI (1 << 0) @@ -432,7 +420,6 @@ extern void igb_update_stats(struct igb_adapter *, struct rtnl_link_stats64 *); extern bool igb_has_link(struct igb_adapter *adapter); extern void igb_set_ethtool_ops(struct net_device *); extern void igb_power_up_link(struct igb_adapter *); -extern void igb_set_fw_version(struct igb_adapter *); #ifdef CONFIG_IGB_PTP extern void igb_ptp_init(struct igb_adapter *adapter); extern void igb_ptp_remove(struct igb_adapter *adapter); diff --git a/trunk/drivers/net/ethernet/intel/igb/igb_ethtool.c b/trunk/drivers/net/ethernet/intel/igb/igb_ethtool.c index a19c84cad0e9..812d4f963bd1 100644 --- a/trunk/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/trunk/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -710,7 +710,6 @@ static int igb_set_eeprom(struct net_device *netdev, if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG))) hw->nvm.ops.update(hw); - igb_set_fw_version(adapter); kfree(eeprom_buff); return ret_val; } @@ -719,16 +718,20 @@ static void igb_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct igb_adapter *adapter = netdev_priv(netdev); + u16 eeprom_data; strlcpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver)); strlcpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version)); - /* - * EEPROM image version # is reported as firmware version # for - * 82575 controllers - */ - strlcpy(drvinfo->fw_version, adapter->fw_version, - sizeof(drvinfo->fw_version)); + /* EEPROM image version # is reported as firmware version # for + * 82575 controllers */ + adapter->hw.nvm.ops.read(&adapter->hw, 5, 1, &eeprom_data); + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), + "%d.%d-%d", + (eeprom_data & 0xF000) >> 12, + (eeprom_data & 0x0FF0) >> 4, + eeprom_data & 0x000F); + strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); drvinfo->n_stats = IGB_STATS_LEN; @@ -2268,38 +2271,6 @@ static void igb_ethtool_complete(struct net_device *netdev) pm_runtime_put(&adapter->pdev->dev); } -#ifdef CONFIG_IGB_PTP -static int igb_ethtool_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) -{ - struct igb_adapter *adapter = netdev_priv(dev); - - info->so_timestamping = - SOF_TIMESTAMPING_TX_HARDWARE | - SOF_TIMESTAMPING_RX_HARDWARE | - SOF_TIMESTAMPING_RAW_HARDWARE; - - if (adapter->ptp_clock) - info->phc_index = ptp_clock_index(adapter->ptp_clock); - else - info->phc_index = -1; - - info->tx_types = - (1 << HWTSTAMP_TX_OFF) | - (1 << HWTSTAMP_TX_ON); - - info->rx_filters = - (1 << HWTSTAMP_FILTER_NONE) | - (1 << HWTSTAMP_FILTER_ALL) | - (1 << HWTSTAMP_FILTER_SOME) | - (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | - (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | - (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); - - return 0; -} - -#endif static const struct ethtool_ops igb_ethtool_ops = { .get_settings = igb_get_settings, .set_settings = igb_set_settings, @@ -2328,9 +2299,6 @@ static const struct ethtool_ops igb_ethtool_ops = { .set_coalesce = igb_set_coalesce, .begin = igb_ethtool_begin, .complete = igb_ethtool_complete, -#ifdef CONFIG_IGB_PTP - .get_ts_info = igb_ethtool_get_ts_info, -#endif }; void igb_set_ethtool_ops(struct net_device *netdev) diff --git a/trunk/drivers/net/ethernet/intel/igb/igb_main.c b/trunk/drivers/net/ethernet/intel/igb/igb_main.c index 01ced68d3aac..dd3bfe8cd36c 100644 --- a/trunk/drivers/net/ethernet/intel/igb/igb_main.c +++ b/trunk/drivers/net/ethernet/intel/igb/igb_main.c @@ -59,9 +59,9 @@ #endif #include "igb.h" -#define MAJ 4 -#define MIN 0 -#define BUILD 1 +#define MAJ 3 +#define MIN 4 +#define BUILD 7 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ __stringify(BUILD) "-k" char igb_driver_name[] = "igb"; @@ -1048,6 +1048,11 @@ static int igb_set_interrupt_capability(struct igb_adapter *adapter) if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) numvecs += adapter->num_tx_queues; + /* i210 and i211 can only have 4 MSIX vectors for rx/tx queues. */ + if ((adapter->hw.mac.type == e1000_i210) + || (adapter->hw.mac.type == e1000_i211)) + numvecs = 4; + /* store the number of vectors reserved for queues */ adapter->num_q_vectors = numvecs; @@ -1815,69 +1820,6 @@ static const struct net_device_ops igb_netdev_ops = { .ndo_set_features = igb_set_features, }; -/** - * igb_set_fw_version - Configure version string for ethtool - * @adapter: adapter struct - * - **/ -void igb_set_fw_version(struct igb_adapter *adapter) -{ - struct e1000_hw *hw = &adapter->hw; - u16 eeprom_verh, eeprom_verl, comb_verh, comb_verl, comb_offset; - u16 major, build, patch, fw_version; - u32 etrack_id; - - hw->nvm.ops.read(hw, 5, 1, &fw_version); - if (adapter->hw.mac.type != e1000_i211) { - hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verh); - hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verl); - etrack_id = (eeprom_verh << IGB_ETRACK_SHIFT) | eeprom_verl; - - /* combo image version needs to be found */ - hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset); - if ((comb_offset != 0x0) && - (comb_offset != IGB_NVM_VER_INVALID)) { - hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset - + 1), 1, &comb_verh); - hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset), - 1, &comb_verl); - - /* Only display Option Rom if it exists and is valid */ - if ((comb_verh && comb_verl) && - ((comb_verh != IGB_NVM_VER_INVALID) && - (comb_verl != IGB_NVM_VER_INVALID))) { - major = comb_verl >> IGB_COMB_VER_SHFT; - build = (comb_verl << IGB_COMB_VER_SHFT) | - (comb_verh >> IGB_COMB_VER_SHFT); - patch = comb_verh & IGB_COMB_VER_MASK; - snprintf(adapter->fw_version, - sizeof(adapter->fw_version), - "%d.%d%d, 0x%08x, %d.%d.%d", - (fw_version & IGB_MAJOR_MASK) >> - IGB_MAJOR_SHIFT, - (fw_version & IGB_MINOR_MASK) >> - IGB_MINOR_SHIFT, - (fw_version & IGB_BUILD_MASK), - etrack_id, major, build, patch); - goto out; - } - } - snprintf(adapter->fw_version, sizeof(adapter->fw_version), - "%d.%d%d, 0x%08x", - (fw_version & IGB_MAJOR_MASK) >> IGB_MAJOR_SHIFT, - (fw_version & IGB_MINOR_MASK) >> IGB_MINOR_SHIFT, - (fw_version & IGB_BUILD_MASK), etrack_id); - } else { - snprintf(adapter->fw_version, sizeof(adapter->fw_version), - "%d.%d%d", - (fw_version & IGB_MAJOR_MASK) >> IGB_MAJOR_SHIFT, - (fw_version & IGB_MINOR_MASK) >> IGB_MINOR_SHIFT, - (fw_version & IGB_BUILD_MASK)); - } -out: - return; -} - /** * igb_probe - Device Initialization Routine * @pdev: PCI device information struct @@ -2088,9 +2030,6 @@ static int __devinit igb_probe(struct pci_dev *pdev, goto err_eeprom; } - /* get firmware version for ethtool -i */ - igb_set_fw_version(adapter); - setup_timer(&adapter->watchdog_timer, igb_watchdog, (unsigned long) adapter); setup_timer(&adapter->phy_info_timer, igb_update_phy_info, @@ -2399,7 +2338,6 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter) struct e1000_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; - u32 max_rss_queues; pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); @@ -2432,69 +2370,40 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter) } else adapter->vfs_allocated_count = max_vfs; break; + case e1000_i210: + case e1000_i211: + adapter->vfs_allocated_count = 0; + break; default: break; } #endif /* CONFIG_PCI_IOV */ - - /* Determine the maximum number of RSS queues supported. */ switch (hw->mac.type) { - case e1000_i211: - max_rss_queues = IGB_MAX_RX_QUEUES_I211; - break; - case e1000_82575: case e1000_i210: - max_rss_queues = IGB_MAX_RX_QUEUES_82575; - break; - case e1000_i350: - /* I350 cannot do RSS and SR-IOV at the same time */ - if (!!adapter->vfs_allocated_count) { - max_rss_queues = 1; - break; - } - /* fall through */ - case e1000_82576: - if (!!adapter->vfs_allocated_count) { - max_rss_queues = 2; - break; - } - /* fall through */ - case e1000_82580: - default: - max_rss_queues = IGB_MAX_RX_QUEUES; + adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES_I210, + num_online_cpus()); break; - } - - adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus()); - - /* Determine if we need to pair queues. */ - switch (hw->mac.type) { - case e1000_82575: case e1000_i211: - /* Device supports enough interrupts without queue pairing. */ + adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES_I211, + num_online_cpus()); break; - case e1000_82576: - /* - * If VFs are going to be allocated with RSS queues then we - * should pair the queues in order to conserve interrupts due - * to limited supply. - */ - if ((adapter->rss_queues > 1) && - (adapter->vfs_allocated_count > 6)) - adapter->flags |= IGB_FLAG_QUEUE_PAIRS; - /* fall through */ - case e1000_82580: - case e1000_i350: - case e1000_i210: default: - /* - * If rss_queues > half of max_rss_queues, pair the queues in - * order to conserve interrupts due to limited supply. - */ - if (adapter->rss_queues > (max_rss_queues / 2)) - adapter->flags |= IGB_FLAG_QUEUE_PAIRS; + adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, + num_online_cpus()); break; } + /* i350 cannot do RSS and SR-IOV at the same time */ + if (hw->mac.type == e1000_i350 && adapter->vfs_allocated_count) + adapter->rss_queues = 1; + + /* + * if rss_queues > 4 or vfs are going to be allocated with rss_queues + * then we should combine the queues into a queue pair in order to + * conserve interrupts due to limited supply + */ + if ((adapter->rss_queues > 4) || + ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6))) + adapter->flags |= IGB_FLAG_QUEUE_PAIRS; /* Setup and initialize a copy of the hw vlan table array */ adapter->shadow_vfta = kzalloc(sizeof(u32) * @@ -7088,11 +6997,6 @@ static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate, } wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */ - /* - * Set global transmit compensation time to the MMW_SIZE in RTTBCNRM - * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported. - */ - wr32(E1000_RTTBCNRM, 0x14); wr32(E1000_RTTBCNRC, bcnrc_val); } diff --git a/trunk/drivers/net/ethernet/intel/igb/igb_ptp.c b/trunk/drivers/net/ethernet/intel/igb/igb_ptp.c index c846ea9131a3..d5ee7fa50723 100644 --- a/trunk/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/trunk/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -330,17 +330,7 @@ void igb_ptp_init(struct igb_adapter *adapter) void igb_ptp_remove(struct igb_adapter *adapter) { - switch (adapter->hw.mac.type) { - case e1000_i211: - case e1000_i210: - case e1000_i350: - case e1000_82580: - case e1000_82576: - cancel_delayed_work_sync(&adapter->overflow_work); - break; - default: - return; - } + cancel_delayed_work_sync(&adapter->overflow_work); if (adapter->ptp_clock) { ptp_clock_unregister(adapter->ptp_clock); diff --git a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index e7dddfd97cb9..dee64d2703f0 100644 --- a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -241,9 +241,7 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, /* Determine 1G link capabilities off of SFP+ type */ if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) { + hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) { *speed = IXGBE_LINK_SPEED_1GB_FULL; *negotiation = true; goto out; diff --git a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index bbc7da5cdb4d..3178f1ec3711 100644 --- a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -154,60 +154,100 @@ static int ixgbe_get_settings(struct net_device *netdev, { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; - ixgbe_link_speed supported_link; u32 link_speed = 0; - bool autoneg; bool link_up; - hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg); - - /* set the supported link speeds */ - if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) - ecmd->supported |= SUPPORTED_10000baseT_Full; - if (supported_link & IXGBE_LINK_SPEED_1GB_FULL) - ecmd->supported |= SUPPORTED_1000baseT_Full; - if (supported_link & IXGBE_LINK_SPEED_100_FULL) - ecmd->supported |= SUPPORTED_100baseT_Full; - - /* set the advertised speeds */ - if (hw->phy.autoneg_advertised) { - if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) - ecmd->advertising |= ADVERTISED_100baseT_Full; - if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) - ecmd->advertising |= ADVERTISED_10000baseT_Full; - if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) - ecmd->advertising |= ADVERTISED_1000baseT_Full; - } else { - /* default modes in case phy.autoneg_advertised isn't set */ - if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) - ecmd->advertising |= ADVERTISED_10000baseT_Full; - if (supported_link & IXGBE_LINK_SPEED_1GB_FULL) - ecmd->advertising |= ADVERTISED_1000baseT_Full; - if (supported_link & IXGBE_LINK_SPEED_100_FULL) - ecmd->advertising |= ADVERTISED_100baseT_Full; - } + ecmd->supported = SUPPORTED_10000baseT_Full; + ecmd->autoneg = AUTONEG_ENABLE; + ecmd->transceiver = XCVR_EXTERNAL; + if ((hw->phy.media_type == ixgbe_media_type_copper) || + (hw->phy.multispeed_fiber)) { + ecmd->supported |= (SUPPORTED_1000baseT_Full | + SUPPORTED_Autoneg); - if (autoneg) { - ecmd->supported |= SUPPORTED_Autoneg; - ecmd->advertising |= ADVERTISED_Autoneg; - ecmd->autoneg = AUTONEG_ENABLE; - } else - ecmd->autoneg = AUTONEG_DISABLE; + switch (hw->mac.type) { + case ixgbe_mac_X540: + ecmd->supported |= SUPPORTED_100baseT_Full; + break; + default: + break; + } - ecmd->transceiver = XCVR_EXTERNAL; + ecmd->advertising = ADVERTISED_Autoneg; + if (hw->phy.autoneg_advertised) { + if (hw->phy.autoneg_advertised & + IXGBE_LINK_SPEED_100_FULL) + ecmd->advertising |= ADVERTISED_100baseT_Full; + if (hw->phy.autoneg_advertised & + IXGBE_LINK_SPEED_10GB_FULL) + ecmd->advertising |= ADVERTISED_10000baseT_Full; + if (hw->phy.autoneg_advertised & + IXGBE_LINK_SPEED_1GB_FULL) + ecmd->advertising |= ADVERTISED_1000baseT_Full; + } else { + /* + * Default advertised modes in case + * phy.autoneg_advertised isn't set. + */ + ecmd->advertising |= (ADVERTISED_10000baseT_Full | + ADVERTISED_1000baseT_Full); + if (hw->mac.type == ixgbe_mac_X540) + ecmd->advertising |= ADVERTISED_100baseT_Full; + } + + if (hw->phy.media_type == ixgbe_media_type_copper) { + ecmd->supported |= SUPPORTED_TP; + ecmd->advertising |= ADVERTISED_TP; + ecmd->port = PORT_TP; + } else { + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_FIBRE; + } + } else if (hw->phy.media_type == ixgbe_media_type_backplane) { + /* Set as FIBRE until SERDES defined in kernel */ + if (hw->device_id == IXGBE_DEV_ID_82598_BX) { + ecmd->supported = (SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE); + ecmd->advertising = (ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE); + ecmd->port = PORT_FIBRE; + ecmd->autoneg = AUTONEG_DISABLE; + } else if ((hw->device_id == IXGBE_DEV_ID_82599_COMBO_BACKPLANE) || + (hw->device_id == IXGBE_DEV_ID_82599_KX4_MEZZ)) { + ecmd->supported |= (SUPPORTED_1000baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_FIBRE); + ecmd->advertising = (ADVERTISED_10000baseT_Full | + ADVERTISED_1000baseT_Full | + ADVERTISED_Autoneg | + ADVERTISED_FIBRE); + ecmd->port = PORT_FIBRE; + } else { + ecmd->supported |= (SUPPORTED_1000baseT_Full | + SUPPORTED_FIBRE); + ecmd->advertising = (ADVERTISED_10000baseT_Full | + ADVERTISED_1000baseT_Full | + ADVERTISED_FIBRE); + ecmd->port = PORT_FIBRE; + } + } else { + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising = (ADVERTISED_10000baseT_Full | + ADVERTISED_FIBRE); + ecmd->port = PORT_FIBRE; + ecmd->autoneg = AUTONEG_DISABLE; + } - /* Determine the remaining settings based on the PHY type. */ + /* Get PHY type */ switch (adapter->hw.phy.type) { case ixgbe_phy_tn: case ixgbe_phy_aq: case ixgbe_phy_cu_unknown: - ecmd->supported |= SUPPORTED_TP; - ecmd->advertising |= ADVERTISED_TP; + /* Copper 10G-BASET */ ecmd->port = PORT_TP; break; case ixgbe_phy_qt: - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->advertising |= ADVERTISED_FIBRE; ecmd->port = PORT_FIBRE; break; case ixgbe_phy_nl: @@ -217,59 +257,42 @@ static int ixgbe_get_settings(struct net_device *netdev, case ixgbe_phy_sfp_avago: case ixgbe_phy_sfp_intel: case ixgbe_phy_sfp_unknown: - /* SFP+ devices, further checking needed */ switch (adapter->hw.phy.sfp_type) { + /* SFP+ devices, further checking needed */ case ixgbe_sfp_type_da_cu: case ixgbe_sfp_type_da_cu_core0: case ixgbe_sfp_type_da_cu_core1: - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->advertising |= ADVERTISED_FIBRE; ecmd->port = PORT_DA; break; case ixgbe_sfp_type_sr: case ixgbe_sfp_type_lr: case ixgbe_sfp_type_srlr_core0: case ixgbe_sfp_type_srlr_core1: - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->advertising |= ADVERTISED_FIBRE; ecmd->port = PORT_FIBRE; break; case ixgbe_sfp_type_not_present: - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->advertising |= ADVERTISED_FIBRE; ecmd->port = PORT_NONE; break; case ixgbe_sfp_type_1g_cu_core0: case ixgbe_sfp_type_1g_cu_core1: - ecmd->supported |= SUPPORTED_TP; - ecmd->advertising |= ADVERTISED_TP; ecmd->port = PORT_TP; - break; - case ixgbe_sfp_type_1g_sx_core0: - case ixgbe_sfp_type_1g_sx_core1: - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->advertising |= ADVERTISED_FIBRE; - ecmd->port = PORT_FIBRE; + ecmd->supported = SUPPORTED_TP; + ecmd->advertising = (ADVERTISED_1000baseT_Full | + ADVERTISED_TP); break; case ixgbe_sfp_type_unknown: default: - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->advertising |= ADVERTISED_FIBRE; ecmd->port = PORT_OTHER; break; } break; case ixgbe_phy_xaui: - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->advertising |= ADVERTISED_FIBRE; ecmd->port = PORT_NONE; break; case ixgbe_phy_unknown: case ixgbe_phy_generic: case ixgbe_phy_sfp_unsupported: default: - ecmd->supported |= SUPPORTED_FIBRE; - ecmd->advertising |= ADVERTISED_FIBRE; ecmd->port = PORT_OTHER; break; } diff --git a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 69a660b5621a..b0ddfd47e473 100644 --- a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -6380,12 +6380,17 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_ring *tx_ring; + if (skb->len <= 0) { + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; + } + /* * The minimum packet size for olinfo paylen is 17 so pad the skb * in order to meet this minimum size requirement. */ - if (unlikely(skb->len < 17)) { - if (skb_pad(skb, 17 - skb->len)) + if (skb->len < 17) { + if (skb_padto(skb, 17)) return NETDEV_TX_OK; skb->len = 17; } diff --git a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index 71659edf81aa..24117709d6a2 100644 --- a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -907,8 +907,6 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) * 8 SFP_act_lmt_DA_CORE1 - 82599-specific * 9 SFP_1g_cu_CORE0 - 82599-specific * 10 SFP_1g_cu_CORE1 - 82599-specific - * 11 SFP_1g_sx_CORE0 - 82599-specific - * 12 SFP_1g_sx_CORE1 - 82599-specific */ if (hw->mac.type == ixgbe_mac_82598EB) { if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) @@ -959,13 +957,6 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) else hw->phy.sfp_type = ixgbe_sfp_type_1g_cu_core1; - } else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) { - if (hw->bus.lan_id == 0) - hw->phy.sfp_type = - ixgbe_sfp_type_1g_sx_core0; - else - hw->phy.sfp_type = - ixgbe_sfp_type_1g_sx_core1; } else { hw->phy.sfp_type = ixgbe_sfp_type_unknown; } @@ -1058,9 +1049,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) /* Verify supported 1G SFP modules */ if (comp_codes_10g == 0 && !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || - hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) { + hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0)) { hw->phy.type = ixgbe_phy_sfp_unsupported; status = IXGBE_ERR_SFP_NOT_SUPPORTED; goto out; @@ -1075,9 +1064,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) hw->mac.ops.get_device_caps(hw, &enforce_sfp); if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) && !((hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0) || - (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) || - (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0) || - (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1))) { + (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1))) { /* Make sure we're a supported PHY type */ if (hw->phy.type == ixgbe_phy_sfp_intel) { status = 0; @@ -1141,12 +1128,10 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, * SR modules */ if (sfp_type == ixgbe_sfp_type_da_act_lmt_core0 || - sfp_type == ixgbe_sfp_type_1g_cu_core0 || - sfp_type == ixgbe_sfp_type_1g_sx_core0) + sfp_type == ixgbe_sfp_type_1g_cu_core0) sfp_type = ixgbe_sfp_type_srlr_core0; else if (sfp_type == ixgbe_sfp_type_da_act_lmt_core1 || - sfp_type == ixgbe_sfp_type_1g_cu_core1 || - sfp_type == ixgbe_sfp_type_1g_sx_core1) + sfp_type == ixgbe_sfp_type_1g_cu_core1) sfp_type = ixgbe_sfp_type_srlr_core1; /* Read offset to PHY init contents */ diff --git a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index 7416d22ec227..1085c0739a3c 100644 --- a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -2604,8 +2604,6 @@ enum ixgbe_sfp_type { ixgbe_sfp_type_da_act_lmt_core1 = 8, ixgbe_sfp_type_1g_cu_core0 = 9, ixgbe_sfp_type_1g_cu_core1 = 10, - ixgbe_sfp_type_1g_sx_core0 = 11, - ixgbe_sfp_type_1g_sx_core1 = 12, ixgbe_sfp_type_not_present = 0xFFFE, ixgbe_sfp_type_unknown = 0xFFFF }; diff --git a/trunk/drivers/net/ethernet/smsc/smsc911x.c b/trunk/drivers/net/ethernet/smsc/smsc911x.c index 54ca99dbb406..1466e5d2af44 100644 --- a/trunk/drivers/net/ethernet/smsc/smsc911x.c +++ b/trunk/drivers/net/ethernet/smsc/smsc911x.c @@ -1442,14 +1442,6 @@ smsc911x_set_hw_mac_address(struct smsc911x_data *pdata, u8 dev_addr[6]) smsc911x_mac_write(pdata, ADDRL, mac_low32); } -static void smsc911x_disable_irq_chip(struct net_device *dev) -{ - struct smsc911x_data *pdata = netdev_priv(dev); - - smsc911x_reg_write(pdata, INT_EN, 0); - smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF); -} - static int smsc911x_open(struct net_device *dev) { struct smsc911x_data *pdata = netdev_priv(dev); @@ -1502,7 +1494,8 @@ static int smsc911x_open(struct net_device *dev) spin_unlock_irq(&pdata->mac_lock); /* Initialise irqs, but leave all sources disabled */ - smsc911x_disable_irq_chip(dev); + smsc911x_reg_write(pdata, INT_EN, 0); + smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF); /* Set interrupt deassertion to 100uS */ intcfg = ((10 << 24) | INT_CFG_IRQ_EN_); @@ -2222,6 +2215,9 @@ static int __devinit smsc911x_init(struct net_device *dev) if (smsc911x_soft_reset(pdata)) return -ENODEV; + /* Disable all interrupt sources until we bring the device up */ + smsc911x_reg_write(pdata, INT_EN, 0); + ether_setup(dev); dev->flags |= IFF_MULTICAST; netif_napi_add(dev, &pdata->napi, smsc911x_poll, SMSC_NAPI_WEIGHT); @@ -2438,7 +2434,8 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev) smsc911x_reg_write(pdata, INT_CFG, intcfg); /* Ensure interrupts are globally disabled before connecting ISR */ - smsc911x_disable_irq_chip(dev); + smsc911x_reg_write(pdata, INT_EN, 0); + smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF); retval = request_irq(dev->irq, smsc911x_irqhandler, irq_flags | IRQF_SHARED, dev->name, dev); diff --git a/trunk/drivers/net/team/team.c b/trunk/drivers/net/team/team.c index 3a4a74be52d9..c61ae35a53ce 100644 --- a/trunk/drivers/net/team/team.c +++ b/trunk/drivers/net/team/team.c @@ -1,5 +1,5 @@ /* - * drivers/net/team/team.c - Network team device driver + * net/drivers/team/team.c - Network team device driver * Copyright (c) 2011 Jiri Pirko * * This program is free software; you can redistribute it and/or modify @@ -82,16 +82,14 @@ static void team_refresh_port_linkup(struct team_port *port) port->state.linkup; } - /******************* * Options handling *******************/ struct team_option_inst { /* One for each option instance */ struct list_head list; - struct list_head tmp_list; struct team_option *option; - struct team_option_inst_info info; + struct team_port *port; /* != NULL if per-port */ bool changed; bool removed; }; @@ -108,6 +106,22 @@ static struct team_option *__team_find_option(struct team *team, return NULL; } +static int __team_option_inst_add(struct team *team, struct team_option *option, + struct team_port *port) +{ + struct team_option_inst *opt_inst; + + opt_inst = kmalloc(sizeof(*opt_inst), GFP_KERNEL); + if (!opt_inst) + return -ENOMEM; + opt_inst->option = option; + opt_inst->port = port; + opt_inst->changed = true; + opt_inst->removed = false; + list_add_tail(&opt_inst->list, &team->option_inst_list); + return 0; +} + static void __team_option_inst_del(struct team_option_inst *opt_inst) { list_del(&opt_inst->list); @@ -125,49 +139,14 @@ static void __team_option_inst_del_option(struct team *team, } } -static int __team_option_inst_add(struct team *team, struct team_option *option, - struct team_port *port) -{ - struct team_option_inst *opt_inst; - unsigned int array_size; - unsigned int i; - int err; - - array_size = option->array_size; - if (!array_size) - array_size = 1; /* No array but still need one instance */ - - for (i = 0; i < array_size; i++) { - opt_inst = kmalloc(sizeof(*opt_inst), GFP_KERNEL); - if (!opt_inst) - return -ENOMEM; - opt_inst->option = option; - opt_inst->info.port = port; - opt_inst->info.array_index = i; - opt_inst->changed = true; - opt_inst->removed = false; - list_add_tail(&opt_inst->list, &team->option_inst_list); - if (option->init) { - err = option->init(team, &opt_inst->info); - if (err) - return err; - } - - } - return 0; -} - static int __team_option_inst_add_option(struct team *team, struct team_option *option) { struct team_port *port; int err; - if (!option->per_port) { - err = __team_option_inst_add(team, option, NULL); - if (err) - goto inst_del_option; - } + if (!option->per_port) + return __team_option_inst_add(team, option, 0); list_for_each_entry(port, &team->port_list, list) { err = __team_option_inst_add(team, option, port); @@ -201,7 +180,7 @@ static void __team_option_inst_del_port(struct team *team, list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) { if (opt_inst->option->per_port && - opt_inst->info.port == port) + opt_inst->port == port) __team_option_inst_del(opt_inst); } } @@ -232,7 +211,7 @@ static void __team_option_inst_mark_removed_port(struct team *team, struct team_option_inst *opt_inst; list_for_each_entry(opt_inst, &team->option_inst_list, list) { - if (opt_inst->info.port == port) { + if (opt_inst->port == port) { opt_inst->changed = true; opt_inst->removed = true; } @@ -345,12 +324,28 @@ void team_options_unregister(struct team *team, } EXPORT_SYMBOL(team_options_unregister); +static int team_option_port_add(struct team *team, struct team_port *port) +{ + int err; + + err = __team_option_inst_add_port(team, port); + if (err) + return err; + __team_options_change_check(team); + return 0; +} + +static void team_option_port_del(struct team *team, struct team_port *port) +{ + __team_option_inst_mark_removed_port(team, port); + __team_options_change_check(team); + __team_option_inst_del_port(team, port); +} + static int team_option_get(struct team *team, struct team_option_inst *opt_inst, struct team_gsetter_ctx *ctx) { - if (!opt_inst->option->getter) - return -EOPNOTSUPP; return opt_inst->option->getter(team, ctx); } @@ -358,26 +353,16 @@ static int team_option_set(struct team *team, struct team_option_inst *opt_inst, struct team_gsetter_ctx *ctx) { - if (!opt_inst->option->setter) - return -EOPNOTSUPP; - return opt_inst->option->setter(team, ctx); -} + int err; -void team_option_inst_set_change(struct team_option_inst_info *opt_inst_info) -{ - struct team_option_inst *opt_inst; + err = opt_inst->option->setter(team, ctx); + if (err) + return err; - opt_inst = container_of(opt_inst_info, struct team_option_inst, info); opt_inst->changed = true; -} -EXPORT_SYMBOL(team_option_inst_set_change); - -void team_options_change_check(struct team *team) -{ __team_options_change_check(team); + return err; } -EXPORT_SYMBOL(team_options_change_check); - /**************** * Mode handling @@ -386,18 +371,13 @@ EXPORT_SYMBOL(team_options_change_check); static LIST_HEAD(mode_list); static DEFINE_SPINLOCK(mode_list_lock); -struct team_mode_item { - struct list_head list; - const struct team_mode *mode; -}; - -static struct team_mode_item *__find_mode(const char *kind) +static struct team_mode *__find_mode(const char *kind) { - struct team_mode_item *mitem; + struct team_mode *mode; - list_for_each_entry(mitem, &mode_list, list) { - if (strcmp(mitem->mode->kind, kind) == 0) - return mitem; + list_for_each_entry(mode, &mode_list, list) { + if (strcmp(mode->kind, kind) == 0) + return mode; } return NULL; } @@ -412,65 +392,49 @@ static bool is_good_mode_name(const char *name) return true; } -int team_mode_register(const struct team_mode *mode) +int team_mode_register(struct team_mode *mode) { int err = 0; - struct team_mode_item *mitem; if (!is_good_mode_name(mode->kind) || mode->priv_size > TEAM_MODE_PRIV_SIZE) return -EINVAL; - - mitem = kmalloc(sizeof(*mitem), GFP_KERNEL); - if (!mitem) - return -ENOMEM; - spin_lock(&mode_list_lock); if (__find_mode(mode->kind)) { err = -EEXIST; - kfree(mitem); goto unlock; } - mitem->mode = mode; - list_add_tail(&mitem->list, &mode_list); + list_add_tail(&mode->list, &mode_list); unlock: spin_unlock(&mode_list_lock); return err; } EXPORT_SYMBOL(team_mode_register); -void team_mode_unregister(const struct team_mode *mode) +int team_mode_unregister(struct team_mode *mode) { - struct team_mode_item *mitem; - spin_lock(&mode_list_lock); - mitem = __find_mode(mode->kind); - if (mitem) { - list_del_init(&mitem->list); - kfree(mitem); - } + list_del_init(&mode->list); spin_unlock(&mode_list_lock); + return 0; } EXPORT_SYMBOL(team_mode_unregister); -static const struct team_mode *team_mode_get(const char *kind) +static struct team_mode *team_mode_get(const char *kind) { - struct team_mode_item *mitem; - const struct team_mode *mode = NULL; + struct team_mode *mode; spin_lock(&mode_list_lock); - mitem = __find_mode(kind); - if (!mitem) { + mode = __find_mode(kind); + if (!mode) { spin_unlock(&mode_list_lock); request_module("team-mode-%s", kind); spin_lock(&mode_list_lock); - mitem = __find_mode(kind); + mode = __find_mode(kind); } - if (mitem) { - mode = mitem->mode; + if (mode) if (!try_module_get(mode->owner)) mode = NULL; - } spin_unlock(&mode_list_lock); return mode; @@ -494,20 +458,6 @@ rx_handler_result_t team_dummy_receive(struct team *team, return RX_HANDLER_ANOTHER; } -static const struct team_mode __team_no_mode = { - .kind = "*NOMODE*", -}; - -static bool team_is_mode_set(struct team *team) -{ - return team->mode != &__team_no_mode; -} - -static void team_set_no_mode(struct team *team) -{ - team->mode = &__team_no_mode; -} - static void team_adjust_ops(struct team *team) { /* @@ -516,13 +466,13 @@ static void team_adjust_ops(struct team *team) */ if (list_empty(&team->port_list) || - !team_is_mode_set(team) || !team->mode->ops->transmit) + !team->mode || !team->mode->ops->transmit) team->ops.transmit = team_dummy_transmit; else team->ops.transmit = team->mode->ops->transmit; if (list_empty(&team->port_list) || - !team_is_mode_set(team) || !team->mode->ops->receive) + !team->mode || !team->mode->ops->receive) team->ops.receive = team_dummy_receive; else team->ops.receive = team->mode->ops->receive; @@ -537,7 +487,7 @@ static int __team_change_mode(struct team *team, const struct team_mode *new_mode) { /* Check if mode was previously set and do cleanup if so */ - if (team_is_mode_set(team)) { + if (team->mode) { void (*exit_op)(struct team *team) = team->ops.exit; /* Clear ops area so no callback is called any longer */ @@ -547,7 +497,7 @@ static int __team_change_mode(struct team *team, if (exit_op) exit_op(team); team_mode_put(team->mode); - team_set_no_mode(team); + team->mode = NULL; /* zero private data area */ memset(&team->mode_priv, 0, sizeof(struct team) - offsetof(struct team, mode_priv)); @@ -573,7 +523,7 @@ static int __team_change_mode(struct team *team, static int team_change_mode(struct team *team, const char *kind) { - const struct team_mode *new_mode; + struct team_mode *new_mode; struct net_device *dev = team->dev; int err; @@ -582,7 +532,7 @@ static int team_change_mode(struct team *team, const char *kind) return -EBUSY; } - if (team_is_mode_set(team) && strcmp(team->mode->kind, kind) == 0) { + if (team->mode && strcmp(team->mode->kind, kind) == 0) { netdev_err(dev, "Unable to change to the same mode the team is in\n"); return -EINVAL; } @@ -687,8 +637,6 @@ static void team_port_enable(struct team *team, port->index = team->en_port_count++; hlist_add_head_rcu(&port->hlist, team_port_index_hash(team, port->index)); - if (team->ops.port_enabled) - team->ops.port_enabled(team, port); } static void __reconstruct_port_hlist(struct team *team, int rm_index) @@ -712,8 +660,6 @@ static void team_port_disable(struct team *team, if (!team_port_enabled(port)) return; - if (team->ops.port_disabled) - team->ops.port_disabled(team, port); hlist_del_rcu(&port->hlist); __reconstruct_port_hlist(team, rm_index); team->en_port_count--; @@ -812,8 +758,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev) return -EBUSY; } - port = kzalloc(sizeof(struct team_port) + team->mode->port_priv_size, - GFP_KERNEL); + port = kzalloc(sizeof(struct team_port), GFP_KERNEL); if (!port) return -ENOMEM; @@ -864,7 +809,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev) goto err_handler_register; } - err = __team_option_inst_add_port(team, port); + err = team_option_port_add(team, port); if (err) { netdev_err(dev, "Device %s failed to add per-port options\n", portname); @@ -877,7 +822,6 @@ static int team_port_add(struct team *team, struct net_device *port_dev) team_adjust_ops(team); __team_compute_features(team); __team_port_change_check(port, !!netif_carrier_ok(port_dev)); - __team_options_change_check(team); netdev_info(dev, "Port device %s added\n", portname); @@ -921,14 +865,12 @@ static int team_port_del(struct team *team, struct net_device *port_dev) return -ENOENT; } - __team_option_inst_mark_removed_port(team, port); - __team_options_change_check(team); - __team_option_inst_del_port(team, port); port->removed = true; __team_port_change_check(port, false); team_port_disable(team, port); list_del_rcu(&port->list); team_adjust_ops(team); + team_option_port_del(team, port); netdev_rx_handler_unregister(port_dev); netdev_set_master(port_dev, NULL); vlan_vids_del_by_dev(port_dev, dev); @@ -949,9 +891,11 @@ static int team_port_del(struct team *team, struct net_device *port_dev) * Net device ops *****************/ +static const char team_no_mode_kind[] = "*NOMODE*"; + static int team_mode_option_get(struct team *team, struct team_gsetter_ctx *ctx) { - ctx->data.str_val = team->mode->kind; + ctx->data.str_val = team->mode ? team->mode->kind : team_no_mode_kind; return 0; } @@ -963,47 +907,39 @@ static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx) static int team_port_en_option_get(struct team *team, struct team_gsetter_ctx *ctx) { - struct team_port *port = ctx->info->port; - - ctx->data.bool_val = team_port_enabled(port); + ctx->data.bool_val = team_port_enabled(ctx->port); return 0; } static int team_port_en_option_set(struct team *team, struct team_gsetter_ctx *ctx) { - struct team_port *port = ctx->info->port; - if (ctx->data.bool_val) - team_port_enable(team, port); + team_port_enable(team, ctx->port); else - team_port_disable(team, port); + team_port_disable(team, ctx->port); return 0; } static int team_user_linkup_option_get(struct team *team, struct team_gsetter_ctx *ctx) { - struct team_port *port = ctx->info->port; - - ctx->data.bool_val = port->user.linkup; + ctx->data.bool_val = ctx->port->user.linkup; return 0; } static int team_user_linkup_option_set(struct team *team, struct team_gsetter_ctx *ctx) { - struct team_port *port = ctx->info->port; - - port->user.linkup = ctx->data.bool_val; - team_refresh_port_linkup(port); + ctx->port->user.linkup = ctx->data.bool_val; + team_refresh_port_linkup(ctx->port); return 0; } static int team_user_linkup_en_option_get(struct team *team, struct team_gsetter_ctx *ctx) { - struct team_port *port = ctx->info->port; + struct team_port *port = ctx->port; ctx->data.bool_val = port->user.linkup_enabled; return 0; @@ -1012,10 +948,10 @@ static int team_user_linkup_en_option_get(struct team *team, static int team_user_linkup_en_option_set(struct team *team, struct team_gsetter_ctx *ctx) { - struct team_port *port = ctx->info->port; + struct team_port *port = ctx->port; port->user.linkup_enabled = ctx->data.bool_val; - team_refresh_port_linkup(port); + team_refresh_port_linkup(ctx->port); return 0; } @@ -1057,7 +993,6 @@ static int team_init(struct net_device *dev) team->dev = dev; mutex_init(&team->lock); - team_set_no_mode(team); team->pcpu_stats = alloc_percpu(struct team_pcpu_stats); if (!team->pcpu_stats) @@ -1547,128 +1482,16 @@ static int team_nl_send_generic(struct genl_info *info, struct team *team, return err; } -typedef int team_nl_send_func_t(struct sk_buff *skb, - struct team *team, u32 pid); - -static int team_nl_send_unicast(struct sk_buff *skb, struct team *team, u32 pid) -{ - return genlmsg_unicast(dev_net(team->dev), skb, pid); -} - -static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team, - struct team_option_inst *opt_inst) -{ - struct nlattr *option_item; - struct team_option *option = opt_inst->option; - struct team_option_inst_info *opt_inst_info = &opt_inst->info; - struct team_gsetter_ctx ctx; - int err; - - ctx.info = opt_inst_info; - err = team_option_get(team, opt_inst, &ctx); - if (err) - return err; - - option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION); - if (!option_item) - return -EMSGSIZE; - - if (nla_put_string(skb, TEAM_ATTR_OPTION_NAME, option->name)) - goto nest_cancel; - if (opt_inst_info->port && - nla_put_u32(skb, TEAM_ATTR_OPTION_PORT_IFINDEX, - opt_inst_info->port->dev->ifindex)) - goto nest_cancel; - if (opt_inst->option->array_size && - nla_put_u32(skb, TEAM_ATTR_OPTION_ARRAY_INDEX, - opt_inst_info->array_index)) - goto nest_cancel; - - switch (option->type) { - case TEAM_OPTION_TYPE_U32: - if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32)) - goto nest_cancel; - if (nla_put_u32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.u32_val)) - goto nest_cancel; - break; - case TEAM_OPTION_TYPE_STRING: - if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING)) - goto nest_cancel; - if (nla_put_string(skb, TEAM_ATTR_OPTION_DATA, - ctx.data.str_val)) - goto nest_cancel; - break; - case TEAM_OPTION_TYPE_BINARY: - if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_BINARY)) - goto nest_cancel; - if (nla_put(skb, TEAM_ATTR_OPTION_DATA, ctx.data.bin_val.len, - ctx.data.bin_val.ptr)) - goto nest_cancel; - break; - case TEAM_OPTION_TYPE_BOOL: - if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_FLAG)) - goto nest_cancel; - if (ctx.data.bool_val && - nla_put_flag(skb, TEAM_ATTR_OPTION_DATA)) - goto nest_cancel; - break; - default: - BUG(); - } - if (opt_inst->removed && nla_put_flag(skb, TEAM_ATTR_OPTION_REMOVED)) - goto nest_cancel; - if (opt_inst->changed) { - if (nla_put_flag(skb, TEAM_ATTR_OPTION_CHANGED)) - goto nest_cancel; - opt_inst->changed = false; - } - nla_nest_end(skb, option_item); - return 0; - -nest_cancel: - nla_nest_cancel(skb, option_item); - return -EMSGSIZE; -} - -static int __send_and_alloc_skb(struct sk_buff **pskb, - struct team *team, u32 pid, - team_nl_send_func_t *send_func) -{ - int err; - - if (*pskb) { - err = send_func(*pskb, team, pid); - if (err) - return err; - } - *pskb = genlmsg_new(NLMSG_DEFAULT_SIZE - GENL_HDRLEN, GFP_KERNEL); - if (!*pskb) - return -ENOMEM; - return 0; -} - -static int team_nl_send_options_get(struct team *team, u32 pid, u32 seq, - int flags, team_nl_send_func_t *send_func, - struct list_head *sel_opt_inst_list) +static int team_nl_fill_options_get(struct sk_buff *skb, + u32 pid, u32 seq, int flags, + struct team *team, bool fillall) { struct nlattr *option_list; - struct nlmsghdr *nlh; void *hdr; struct team_option_inst *opt_inst; int err; - struct sk_buff *skb = NULL; - bool incomplete; - int i; - - opt_inst = list_first_entry(sel_opt_inst_list, - struct team_option_inst, tmp_list); - -start_again: - err = __send_and_alloc_skb(&skb, team, pid, send_func); - if (err) - return err; - hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags | NLM_F_MULTI, + hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags, TEAM_CMD_OPTIONS_GET); if (IS_ERR(hdr)) return PTR_ERR(hdr); @@ -1677,80 +1500,122 @@ static int team_nl_send_options_get(struct team *team, u32 pid, u32 seq, goto nla_put_failure; option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION); if (!option_list) - goto nla_put_failure; + return -EMSGSIZE; - i = 0; - incomplete = false; - list_for_each_entry_from(opt_inst, sel_opt_inst_list, tmp_list) { - err = team_nl_fill_one_option_get(skb, team, opt_inst); - if (err) { - if (err == -EMSGSIZE) { - if (!i) - goto errout; - incomplete = true; - break; - } - goto errout; + list_for_each_entry(opt_inst, &team->option_inst_list, list) { + struct nlattr *option_item; + struct team_option *option = opt_inst->option; + struct team_gsetter_ctx ctx; + + /* Include only changed options if fill all mode is not on */ + if (!fillall && !opt_inst->changed) + continue; + option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION); + if (!option_item) + goto nla_put_failure; + if (nla_put_string(skb, TEAM_ATTR_OPTION_NAME, option->name)) + goto nla_put_failure; + if (opt_inst->changed) { + if (nla_put_flag(skb, TEAM_ATTR_OPTION_CHANGED)) + goto nla_put_failure; + opt_inst->changed = false; + } + if (opt_inst->removed && + nla_put_flag(skb, TEAM_ATTR_OPTION_REMOVED)) + goto nla_put_failure; + if (opt_inst->port && + nla_put_u32(skb, TEAM_ATTR_OPTION_PORT_IFINDEX, + opt_inst->port->dev->ifindex)) + goto nla_put_failure; + ctx.port = opt_inst->port; + switch (option->type) { + case TEAM_OPTION_TYPE_U32: + if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32)) + goto nla_put_failure; + err = team_option_get(team, opt_inst, &ctx); + if (err) + goto errout; + if (nla_put_u32(skb, TEAM_ATTR_OPTION_DATA, + ctx.data.u32_val)) + goto nla_put_failure; + break; + case TEAM_OPTION_TYPE_STRING: + if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING)) + goto nla_put_failure; + err = team_option_get(team, opt_inst, &ctx); + if (err) + goto errout; + if (nla_put_string(skb, TEAM_ATTR_OPTION_DATA, + ctx.data.str_val)) + goto nla_put_failure; + break; + case TEAM_OPTION_TYPE_BINARY: + if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_BINARY)) + goto nla_put_failure; + err = team_option_get(team, opt_inst, &ctx); + if (err) + goto errout; + if (nla_put(skb, TEAM_ATTR_OPTION_DATA, + ctx.data.bin_val.len, ctx.data.bin_val.ptr)) + goto nla_put_failure; + break; + case TEAM_OPTION_TYPE_BOOL: + if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_FLAG)) + goto nla_put_failure; + err = team_option_get(team, opt_inst, &ctx); + if (err) + goto errout; + if (ctx.data.bool_val && + nla_put_flag(skb, TEAM_ATTR_OPTION_DATA)) + goto nla_put_failure; + break; + default: + BUG(); } - i++; + nla_nest_end(skb, option_item); } nla_nest_end(skb, option_list); - genlmsg_end(skb, hdr); - if (incomplete) - goto start_again; - -send_done: - nlh = nlmsg_put(skb, pid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI); - if (!nlh) { - err = __send_and_alloc_skb(&skb, team, pid, send_func); - if (err) - goto errout; - goto send_done; - } - - return send_func(skb, team, pid); + return genlmsg_end(skb, hdr); nla_put_failure: err = -EMSGSIZE; errout: genlmsg_cancel(skb, hdr); - nlmsg_free(skb); return err; } +static int team_nl_fill_options_get_all(struct sk_buff *skb, + struct genl_info *info, int flags, + struct team *team) +{ + return team_nl_fill_options_get(skb, info->snd_pid, + info->snd_seq, NLM_F_ACK, + team, true); +} + static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info) { struct team *team; - struct team_option_inst *opt_inst; int err; - LIST_HEAD(sel_opt_inst_list); team = team_nl_team_get(info); if (!team) return -EINVAL; - list_for_each_entry(opt_inst, &team->option_inst_list, list) - list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list); - err = team_nl_send_options_get(team, info->snd_pid, info->snd_seq, - NLM_F_ACK, team_nl_send_unicast, - &sel_opt_inst_list); + err = team_nl_send_generic(info, team, team_nl_fill_options_get_all); team_nl_team_put(team); return err; } -static int team_nl_send_event_options_get(struct team *team, - struct list_head *sel_opt_inst_list); - static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) { struct team *team; int err = 0; int i; struct nlattr *nl_option; - LIST_HEAD(opt_inst_list); team = team_nl_team_get(info); if (!team) @@ -1764,12 +1629,10 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) { struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1]; - struct nlattr *attr; + struct nlattr *attr_port_ifindex; struct nlattr *attr_data; enum team_option_type opt_type; int opt_port_ifindex = 0; /* != 0 for per-port options */ - u32 opt_array_index = 0; - bool opt_is_array = false; struct team_option_inst *opt_inst; char *opt_name; bool opt_found = false; @@ -1811,33 +1674,23 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) } opt_name = nla_data(opt_attrs[TEAM_ATTR_OPTION_NAME]); - attr = opt_attrs[TEAM_ATTR_OPTION_PORT_IFINDEX]; - if (attr) - opt_port_ifindex = nla_get_u32(attr); - - attr = opt_attrs[TEAM_ATTR_OPTION_ARRAY_INDEX]; - if (attr) { - opt_is_array = true; - opt_array_index = nla_get_u32(attr); - } + attr_port_ifindex = opt_attrs[TEAM_ATTR_OPTION_PORT_IFINDEX]; + if (attr_port_ifindex) + opt_port_ifindex = nla_get_u32(attr_port_ifindex); list_for_each_entry(opt_inst, &team->option_inst_list, list) { struct team_option *option = opt_inst->option; struct team_gsetter_ctx ctx; - struct team_option_inst_info *opt_inst_info; int tmp_ifindex; - opt_inst_info = &opt_inst->info; - tmp_ifindex = opt_inst_info->port ? - opt_inst_info->port->dev->ifindex : 0; + tmp_ifindex = opt_inst->port ? + opt_inst->port->dev->ifindex : 0; if (option->type != opt_type || strcmp(option->name, opt_name) || - tmp_ifindex != opt_port_ifindex || - (option->array_size && !opt_is_array) || - opt_inst_info->array_index != opt_array_index) + tmp_ifindex != opt_port_ifindex) continue; opt_found = true; - ctx.info = opt_inst_info; + ctx.port = opt_inst->port; switch (opt_type) { case TEAM_OPTION_TYPE_U32: ctx.data.u32_val = nla_get_u32(attr_data); @@ -1862,8 +1715,6 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) err = team_option_set(team, opt_inst, &ctx); if (err) goto team_put; - opt_inst->changed = true; - list_add(&opt_inst->tmp_list, &opt_inst_list); } if (!opt_found) { err = -ENOENT; @@ -1871,8 +1722,6 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info) } } - err = team_nl_send_event_options_get(team, &opt_inst_list); - team_put: team_nl_team_put(team); @@ -1897,7 +1746,7 @@ static int team_nl_fill_port_list_get(struct sk_buff *skb, goto nla_put_failure; port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT); if (!port_list) - goto nla_put_failure; + return -EMSGSIZE; list_for_each_entry(port, &team->port_list, list) { struct nlattr *port_item; @@ -1989,18 +1838,27 @@ static struct genl_multicast_group team_change_event_mcgrp = { .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, }; -static int team_nl_send_multicast(struct sk_buff *skb, - struct team *team, u32 pid) +static int team_nl_send_event_options_get(struct team *team) { - return genlmsg_multicast_netns(dev_net(team->dev), skb, 0, - team_change_event_mcgrp.id, GFP_KERNEL); -} + struct sk_buff *skb; + int err; + struct net *net = dev_net(team->dev); -static int team_nl_send_event_options_get(struct team *team, - struct list_head *sel_opt_inst_list) -{ - return team_nl_send_options_get(team, 0, 0, 0, team_nl_send_multicast, - sel_opt_inst_list); + skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + err = team_nl_fill_options_get(skb, 0, 0, 0, team, false); + if (err < 0) + goto err_fill; + + err = genlmsg_multicast_netns(net, skb, 0, team_change_event_mcgrp.id, + GFP_KERNEL); + return err; + +err_fill: + nlmsg_free(skb); + return err; } static int team_nl_send_event_port_list_get(struct team *team) @@ -2060,17 +1918,10 @@ static void team_nl_fini(void) static void __team_options_change_check(struct team *team) { int err; - struct team_option_inst *opt_inst; - LIST_HEAD(sel_opt_inst_list); - list_for_each_entry(opt_inst, &team->option_inst_list, list) { - if (opt_inst->changed) - list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list); - } - err = team_nl_send_event_options_get(team, &sel_opt_inst_list); + err = team_nl_send_event_options_get(team); if (err) - netdev_warn(team->dev, "Failed to send options change via netlink (err %d)\n", - err); + netdev_warn(team->dev, "Failed to send options change via netlink\n"); } /* rtnl lock is held */ @@ -2114,7 +1965,6 @@ static void team_port_change_check(struct team_port *port, bool linkup) mutex_unlock(&team->lock); } - /************************************ * Net device notifier event handler ************************************/ diff --git a/trunk/drivers/net/team/team_mode_activebackup.c b/trunk/drivers/net/team/team_mode_activebackup.c index 253b8a5f3427..fd6bd03aaa89 100644 --- a/trunk/drivers/net/team/team_mode_activebackup.c +++ b/trunk/drivers/net/team/team_mode_activebackup.c @@ -1,5 +1,5 @@ /* - * drivers/net/team/team_mode_activebackup.c - Active-backup mode for team + * net/drivers/team/team_mode_activebackup.c - Active-backup mode for team * Copyright (c) 2011 Jiri Pirko * * This program is free software; you can redistribute it and/or modify @@ -40,7 +40,7 @@ static bool ab_transmit(struct team *team, struct sk_buff *skb) { struct team_port *active_port; - active_port = rcu_dereference_bh(ab_priv(team)->active_port); + active_port = rcu_dereference(ab_priv(team)->active_port); if (unlikely(!active_port)) goto drop; skb->dev = active_port->dev; @@ -61,12 +61,8 @@ static void ab_port_leave(struct team *team, struct team_port *port) static int ab_active_port_get(struct team *team, struct team_gsetter_ctx *ctx) { - struct team_port *active_port; - - active_port = rcu_dereference_protected(ab_priv(team)->active_port, - lockdep_is_held(&team->lock)); - if (active_port) - ctx->data.u32_val = active_port->dev->ifindex; + if (ab_priv(team)->active_port) + ctx->data.u32_val = ab_priv(team)->active_port->dev->ifindex; else ctx->data.u32_val = 0; return 0; @@ -112,7 +108,7 @@ static const struct team_mode_ops ab_mode_ops = { .port_leave = ab_port_leave, }; -static const struct team_mode ab_mode = { +static struct team_mode ab_mode = { .kind = "activebackup", .owner = THIS_MODULE, .priv_size = sizeof(struct ab_priv), diff --git a/trunk/drivers/net/team/team_mode_loadbalance.c b/trunk/drivers/net/team/team_mode_loadbalance.c index c92fa02d6a63..86e8183c8e3d 100644 --- a/trunk/drivers/net/team/team_mode_loadbalance.c +++ b/trunk/drivers/net/team/team_mode_loadbalance.c @@ -17,210 +17,34 @@ #include #include -struct lb_priv; - -typedef struct team_port *lb_select_tx_port_func_t(struct team *, - struct lb_priv *, - struct sk_buff *, - unsigned char); - -#define LB_TX_HASHTABLE_SIZE 256 /* hash is a char */ - -struct lb_stats { - u64 tx_bytes; -}; - -struct lb_pcpu_stats { - struct lb_stats hash_stats[LB_TX_HASHTABLE_SIZE]; - struct u64_stats_sync syncp; -}; - -struct lb_stats_info { - struct lb_stats stats; - struct lb_stats last_stats; - struct team_option_inst_info *opt_inst_info; -}; - -struct lb_port_mapping { - struct team_port __rcu *port; - struct team_option_inst_info *opt_inst_info; -}; - -struct lb_priv_ex { - struct team *team; - struct lb_port_mapping tx_hash_to_port_mapping[LB_TX_HASHTABLE_SIZE]; - struct sock_fprog *orig_fprog; - struct { - unsigned int refresh_interval; /* in tenths of second */ - struct delayed_work refresh_dw; - struct lb_stats_info info[LB_TX_HASHTABLE_SIZE]; - } stats; -}; - struct lb_priv { struct sk_filter __rcu *fp; - lb_select_tx_port_func_t __rcu *select_tx_port_func; - struct lb_pcpu_stats __percpu *pcpu_stats; - struct lb_priv_ex *ex; /* priv extension */ + struct sock_fprog *orig_fprog; }; -static struct lb_priv *get_lb_priv(struct team *team) +static struct lb_priv *lb_priv(struct team *team) { return (struct lb_priv *) &team->mode_priv; } -struct lb_port_priv { - struct lb_stats __percpu *pcpu_stats; - struct lb_stats_info stats_info; -}; - -static struct lb_port_priv *get_lb_port_priv(struct team_port *port) -{ - return (struct lb_port_priv *) &port->mode_priv; -} - -#define LB_HTPM_PORT_BY_HASH(lp_priv, hash) \ - (lb_priv)->ex->tx_hash_to_port_mapping[hash].port - -#define LB_HTPM_OPT_INST_INFO_BY_HASH(lp_priv, hash) \ - (lb_priv)->ex->tx_hash_to_port_mapping[hash].opt_inst_info - -static void lb_tx_hash_to_port_mapping_null_port(struct team *team, - struct team_port *port) -{ - struct lb_priv *lb_priv = get_lb_priv(team); - bool changed = false; - int i; - - for (i = 0; i < LB_TX_HASHTABLE_SIZE; i++) { - struct lb_port_mapping *pm; - - pm = &lb_priv->ex->tx_hash_to_port_mapping[i]; - if (rcu_access_pointer(pm->port) == port) { - RCU_INIT_POINTER(pm->port, NULL); - team_option_inst_set_change(pm->opt_inst_info); - changed = true; - } - } - if (changed) - team_options_change_check(team); -} - -/* Basic tx selection based solely by hash */ -static struct team_port *lb_hash_select_tx_port(struct team *team, - struct lb_priv *lb_priv, - struct sk_buff *skb, - unsigned char hash) -{ - int port_index; - - port_index = hash % team->en_port_count; - return team_get_port_by_index_rcu(team, port_index); -} - -/* Hash to port mapping select tx port */ -static struct team_port *lb_htpm_select_tx_port(struct team *team, - struct lb_priv *lb_priv, - struct sk_buff *skb, - unsigned char hash) -{ - return rcu_dereference_bh(LB_HTPM_PORT_BY_HASH(lb_priv, hash)); -} - -struct lb_select_tx_port { - char *name; - lb_select_tx_port_func_t *func; -}; - -static const struct lb_select_tx_port lb_select_tx_port_list[] = { - { - .name = "hash", - .func = lb_hash_select_tx_port, - }, - { - .name = "hash_to_port_mapping", - .func = lb_htpm_select_tx_port, - }, -}; -#define LB_SELECT_TX_PORT_LIST_COUNT ARRAY_SIZE(lb_select_tx_port_list) - -static char *lb_select_tx_port_get_name(lb_select_tx_port_func_t *func) -{ - int i; - - for (i = 0; i < LB_SELECT_TX_PORT_LIST_COUNT; i++) { - const struct lb_select_tx_port *item; - - item = &lb_select_tx_port_list[i]; - if (item->func == func) - return item->name; - } - return NULL; -} - -static lb_select_tx_port_func_t *lb_select_tx_port_get_func(const char *name) -{ - int i; - - for (i = 0; i < LB_SELECT_TX_PORT_LIST_COUNT; i++) { - const struct lb_select_tx_port *item; - - item = &lb_select_tx_port_list[i]; - if (!strcmp(item->name, name)) - return item->func; - } - return NULL; -} - -static unsigned int lb_get_skb_hash(struct lb_priv *lb_priv, - struct sk_buff *skb) -{ - struct sk_filter *fp; - uint32_t lhash; - unsigned char *c; - - fp = rcu_dereference_bh(lb_priv->fp); - if (unlikely(!fp)) - return 0; - lhash = SK_RUN_FILTER(fp, skb); - c = (char *) &lhash; - return c[0] ^ c[1] ^ c[2] ^ c[3]; -} - -static void lb_update_tx_stats(unsigned int tx_bytes, struct lb_priv *lb_priv, - struct lb_port_priv *lb_port_priv, - unsigned char hash) -{ - struct lb_pcpu_stats *pcpu_stats; - struct lb_stats *port_stats; - struct lb_stats *hash_stats; - - pcpu_stats = this_cpu_ptr(lb_priv->pcpu_stats); - port_stats = this_cpu_ptr(lb_port_priv->pcpu_stats); - hash_stats = &pcpu_stats->hash_stats[hash]; - u64_stats_update_begin(&pcpu_stats->syncp); - port_stats->tx_bytes += tx_bytes; - hash_stats->tx_bytes += tx_bytes; - u64_stats_update_end(&pcpu_stats->syncp); -} - static bool lb_transmit(struct team *team, struct sk_buff *skb) { - struct lb_priv *lb_priv = get_lb_priv(team); - lb_select_tx_port_func_t *select_tx_port_func; + struct sk_filter *fp; struct team_port *port; - unsigned char hash; - unsigned int tx_bytes = skb->len; + unsigned int hash; + int port_index; - hash = lb_get_skb_hash(lb_priv, skb); - select_tx_port_func = rcu_dereference_bh(lb_priv->select_tx_port_func); - port = select_tx_port_func(team, lb_priv, skb, hash); + fp = rcu_dereference(lb_priv(team)->fp); + if (unlikely(!fp)) + goto drop; + hash = SK_RUN_FILTER(fp, skb); + port_index = hash % team->en_port_count; + port = team_get_port_by_index_rcu(team, port_index); if (unlikely(!port)) goto drop; skb->dev = port->dev; if (dev_queue_xmit(skb)) return false; - lb_update_tx_stats(tx_bytes, lb_priv, get_lb_port_priv(port), hash); return true; drop: @@ -230,16 +54,14 @@ static bool lb_transmit(struct team *team, struct sk_buff *skb) static int lb_bpf_func_get(struct team *team, struct team_gsetter_ctx *ctx) { - struct lb_priv *lb_priv = get_lb_priv(team); - - if (!lb_priv->ex->orig_fprog) { + if (!lb_priv(team)->orig_fprog) { ctx->data.bin_val.len = 0; ctx->data.bin_val.ptr = NULL; return 0; } - ctx->data.bin_val.len = lb_priv->ex->orig_fprog->len * + ctx->data.bin_val.len = lb_priv(team)->orig_fprog->len * sizeof(struct sock_filter); - ctx->data.bin_val.ptr = lb_priv->ex->orig_fprog->filter; + ctx->data.bin_val.ptr = lb_priv(team)->orig_fprog->filter; return 0; } @@ -272,9 +94,7 @@ static void __fprog_destroy(struct sock_fprog *fprog) static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx) { - struct lb_priv *lb_priv = get_lb_priv(team); struct sk_filter *fp = NULL; - struct sk_filter *orig_fp; struct sock_fprog *fprog = NULL; int err; @@ -290,237 +110,14 @@ static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx) } } - if (lb_priv->ex->orig_fprog) { + if (lb_priv(team)->orig_fprog) { /* Clear old filter data */ - __fprog_destroy(lb_priv->ex->orig_fprog); - orig_fp = rcu_dereference_protected(lb_priv->fp, - lockdep_is_held(&team->lock)); - sk_unattached_filter_destroy(orig_fp); + __fprog_destroy(lb_priv(team)->orig_fprog); + sk_unattached_filter_destroy(lb_priv(team)->fp); } - rcu_assign_pointer(lb_priv->fp, fp); - lb_priv->ex->orig_fprog = fprog; - return 0; -} - -static int lb_tx_method_get(struct team *team, struct team_gsetter_ctx *ctx) -{ - struct lb_priv *lb_priv = get_lb_priv(team); - lb_select_tx_port_func_t *func; - char *name; - - func = rcu_dereference_protected(lb_priv->select_tx_port_func, - lockdep_is_held(&team->lock)); - name = lb_select_tx_port_get_name(func); - BUG_ON(!name); - ctx->data.str_val = name; - return 0; -} - -static int lb_tx_method_set(struct team *team, struct team_gsetter_ctx *ctx) -{ - struct lb_priv *lb_priv = get_lb_priv(team); - lb_select_tx_port_func_t *func; - - func = lb_select_tx_port_get_func(ctx->data.str_val); - if (!func) - return -EINVAL; - rcu_assign_pointer(lb_priv->select_tx_port_func, func); - return 0; -} - -static int lb_tx_hash_to_port_mapping_init(struct team *team, - struct team_option_inst_info *info) -{ - struct lb_priv *lb_priv = get_lb_priv(team); - unsigned char hash = info->array_index; - - LB_HTPM_OPT_INST_INFO_BY_HASH(lb_priv, hash) = info; - return 0; -} - -static int lb_tx_hash_to_port_mapping_get(struct team *team, - struct team_gsetter_ctx *ctx) -{ - struct lb_priv *lb_priv = get_lb_priv(team); - struct team_port *port; - unsigned char hash = ctx->info->array_index; - - port = LB_HTPM_PORT_BY_HASH(lb_priv, hash); - ctx->data.u32_val = port ? port->dev->ifindex : 0; - return 0; -} - -static int lb_tx_hash_to_port_mapping_set(struct team *team, - struct team_gsetter_ctx *ctx) -{ - struct lb_priv *lb_priv = get_lb_priv(team); - struct team_port *port; - unsigned char hash = ctx->info->array_index; - - list_for_each_entry(port, &team->port_list, list) { - if (ctx->data.u32_val == port->dev->ifindex) { - rcu_assign_pointer(LB_HTPM_PORT_BY_HASH(lb_priv, hash), - port); - return 0; - } - } - return -ENODEV; -} - -static int lb_hash_stats_init(struct team *team, - struct team_option_inst_info *info) -{ - struct lb_priv *lb_priv = get_lb_priv(team); - unsigned char hash = info->array_index; - - lb_priv->ex->stats.info[hash].opt_inst_info = info; - return 0; -} - -static int lb_hash_stats_get(struct team *team, struct team_gsetter_ctx *ctx) -{ - struct lb_priv *lb_priv = get_lb_priv(team); - unsigned char hash = ctx->info->array_index; - - ctx->data.bin_val.ptr = &lb_priv->ex->stats.info[hash].stats; - ctx->data.bin_val.len = sizeof(struct lb_stats); - return 0; -} - -static int lb_port_stats_init(struct team *team, - struct team_option_inst_info *info) -{ - struct team_port *port = info->port; - struct lb_port_priv *lb_port_priv = get_lb_port_priv(port); - - lb_port_priv->stats_info.opt_inst_info = info; - return 0; -} - -static int lb_port_stats_get(struct team *team, struct team_gsetter_ctx *ctx) -{ - struct team_port *port = ctx->info->port; - struct lb_port_priv *lb_port_priv = get_lb_port_priv(port); - - ctx->data.bin_val.ptr = &lb_port_priv->stats_info.stats; - ctx->data.bin_val.len = sizeof(struct lb_stats); - return 0; -} - -static void __lb_stats_info_refresh_prepare(struct lb_stats_info *s_info) -{ - memcpy(&s_info->last_stats, &s_info->stats, sizeof(struct lb_stats)); - memset(&s_info->stats, 0, sizeof(struct lb_stats)); -} - -static bool __lb_stats_info_refresh_check(struct lb_stats_info *s_info, - struct team *team) -{ - if (memcmp(&s_info->last_stats, &s_info->stats, - sizeof(struct lb_stats))) { - team_option_inst_set_change(s_info->opt_inst_info); - return true; - } - return false; -} - -static void __lb_one_cpu_stats_add(struct lb_stats *acc_stats, - struct lb_stats *cpu_stats, - struct u64_stats_sync *syncp) -{ - unsigned int start; - struct lb_stats tmp; - - do { - start = u64_stats_fetch_begin_bh(syncp); - tmp.tx_bytes = cpu_stats->tx_bytes; - } while (u64_stats_fetch_retry_bh(syncp, start)); - acc_stats->tx_bytes += tmp.tx_bytes; -} - -static void lb_stats_refresh(struct work_struct *work) -{ - struct team *team; - struct lb_priv *lb_priv; - struct lb_priv_ex *lb_priv_ex; - struct lb_pcpu_stats *pcpu_stats; - struct lb_stats *stats; - struct lb_stats_info *s_info; - struct team_port *port; - bool changed = false; - int i; - int j; - - lb_priv_ex = container_of(work, struct lb_priv_ex, - stats.refresh_dw.work); - - team = lb_priv_ex->team; - lb_priv = get_lb_priv(team); - - if (!mutex_trylock(&team->lock)) { - schedule_delayed_work(&lb_priv_ex->stats.refresh_dw, 0); - return; - } - - for (j = 0; j < LB_TX_HASHTABLE_SIZE; j++) { - s_info = &lb_priv->ex->stats.info[j]; - __lb_stats_info_refresh_prepare(s_info); - for_each_possible_cpu(i) { - pcpu_stats = per_cpu_ptr(lb_priv->pcpu_stats, i); - stats = &pcpu_stats->hash_stats[j]; - __lb_one_cpu_stats_add(&s_info->stats, stats, - &pcpu_stats->syncp); - } - changed |= __lb_stats_info_refresh_check(s_info, team); - } - - list_for_each_entry(port, &team->port_list, list) { - struct lb_port_priv *lb_port_priv = get_lb_port_priv(port); - - s_info = &lb_port_priv->stats_info; - __lb_stats_info_refresh_prepare(s_info); - for_each_possible_cpu(i) { - pcpu_stats = per_cpu_ptr(lb_priv->pcpu_stats, i); - stats = per_cpu_ptr(lb_port_priv->pcpu_stats, i); - __lb_one_cpu_stats_add(&s_info->stats, stats, - &pcpu_stats->syncp); - } - changed |= __lb_stats_info_refresh_check(s_info, team); - } - - if (changed) - team_options_change_check(team); - - schedule_delayed_work(&lb_priv_ex->stats.refresh_dw, - (lb_priv_ex->stats.refresh_interval * HZ) / 10); - - mutex_unlock(&team->lock); -} - -static int lb_stats_refresh_interval_get(struct team *team, - struct team_gsetter_ctx *ctx) -{ - struct lb_priv *lb_priv = get_lb_priv(team); - - ctx->data.u32_val = lb_priv->ex->stats.refresh_interval; - return 0; -} - -static int lb_stats_refresh_interval_set(struct team *team, - struct team_gsetter_ctx *ctx) -{ - struct lb_priv *lb_priv = get_lb_priv(team); - unsigned int interval; - - interval = ctx->data.u32_val; - if (lb_priv->ex->stats.refresh_interval == interval) - return 0; - lb_priv->ex->stats.refresh_interval = interval; - if (interval) - schedule_delayed_work(&lb_priv->ex->stats.refresh_dw, 0); - else - cancel_delayed_work(&lb_priv->ex->stats.refresh_dw); + rcu_assign_pointer(lb_priv(team)->fp, fp); + lb_priv(team)->orig_fprog = fprog; return 0; } @@ -531,125 +128,30 @@ static const struct team_option lb_options[] = { .getter = lb_bpf_func_get, .setter = lb_bpf_func_set, }, - { - .name = "lb_tx_method", - .type = TEAM_OPTION_TYPE_STRING, - .getter = lb_tx_method_get, - .setter = lb_tx_method_set, - }, - { - .name = "lb_tx_hash_to_port_mapping", - .array_size = LB_TX_HASHTABLE_SIZE, - .type = TEAM_OPTION_TYPE_U32, - .init = lb_tx_hash_to_port_mapping_init, - .getter = lb_tx_hash_to_port_mapping_get, - .setter = lb_tx_hash_to_port_mapping_set, - }, - { - .name = "lb_hash_stats", - .array_size = LB_TX_HASHTABLE_SIZE, - .type = TEAM_OPTION_TYPE_BINARY, - .init = lb_hash_stats_init, - .getter = lb_hash_stats_get, - }, - { - .name = "lb_port_stats", - .per_port = true, - .type = TEAM_OPTION_TYPE_BINARY, - .init = lb_port_stats_init, - .getter = lb_port_stats_get, - }, - { - .name = "lb_stats_refresh_interval", - .type = TEAM_OPTION_TYPE_U32, - .getter = lb_stats_refresh_interval_get, - .setter = lb_stats_refresh_interval_set, - }, }; static int lb_init(struct team *team) { - struct lb_priv *lb_priv = get_lb_priv(team); - lb_select_tx_port_func_t *func; - int err; - - /* set default tx port selector */ - func = lb_select_tx_port_get_func("hash"); - BUG_ON(!func); - rcu_assign_pointer(lb_priv->select_tx_port_func, func); - - lb_priv->ex = kzalloc(sizeof(*lb_priv->ex), GFP_KERNEL); - if (!lb_priv->ex) - return -ENOMEM; - lb_priv->ex->team = team; - - lb_priv->pcpu_stats = alloc_percpu(struct lb_pcpu_stats); - if (!lb_priv->pcpu_stats) { - err = -ENOMEM; - goto err_alloc_pcpu_stats; - } - - INIT_DELAYED_WORK(&lb_priv->ex->stats.refresh_dw, lb_stats_refresh); - - err = team_options_register(team, lb_options, ARRAY_SIZE(lb_options)); - if (err) - goto err_options_register; - return 0; - -err_options_register: - free_percpu(lb_priv->pcpu_stats); -err_alloc_pcpu_stats: - kfree(lb_priv->ex); - return err; + return team_options_register(team, lb_options, + ARRAY_SIZE(lb_options)); } static void lb_exit(struct team *team) { - struct lb_priv *lb_priv = get_lb_priv(team); - team_options_unregister(team, lb_options, ARRAY_SIZE(lb_options)); - cancel_delayed_work_sync(&lb_priv->ex->stats.refresh_dw); - free_percpu(lb_priv->pcpu_stats); - kfree(lb_priv->ex); -} - -static int lb_port_enter(struct team *team, struct team_port *port) -{ - struct lb_port_priv *lb_port_priv = get_lb_port_priv(port); - - lb_port_priv->pcpu_stats = alloc_percpu(struct lb_stats); - if (!lb_port_priv->pcpu_stats) - return -ENOMEM; - return 0; -} - -static void lb_port_leave(struct team *team, struct team_port *port) -{ - struct lb_port_priv *lb_port_priv = get_lb_port_priv(port); - - free_percpu(lb_port_priv->pcpu_stats); -} - -static void lb_port_disabled(struct team *team, struct team_port *port) -{ - lb_tx_hash_to_port_mapping_null_port(team, port); } static const struct team_mode_ops lb_mode_ops = { .init = lb_init, .exit = lb_exit, - .port_enter = lb_port_enter, - .port_leave = lb_port_leave, - .port_disabled = lb_port_disabled, .transmit = lb_transmit, }; -static const struct team_mode lb_mode = { +static struct team_mode lb_mode = { .kind = "loadbalance", .owner = THIS_MODULE, .priv_size = sizeof(struct lb_priv), - .port_priv_size = sizeof(struct lb_port_priv), .ops = &lb_mode_ops, }; diff --git a/trunk/drivers/net/team/team_mode_roundrobin.c b/trunk/drivers/net/team/team_mode_roundrobin.c index 52dd0ec9cd1f..6abfbdc96be5 100644 --- a/trunk/drivers/net/team/team_mode_roundrobin.c +++ b/trunk/drivers/net/team/team_mode_roundrobin.c @@ -1,5 +1,5 @@ /* - * drivers/net/team/team_mode_roundrobin.c - Round-robin mode for team + * net/drivers/team/team_mode_roundrobin.c - Round-robin mode for team * Copyright (c) 2011 Jiri Pirko * * This program is free software; you can redistribute it and/or modify @@ -81,7 +81,7 @@ static const struct team_mode_ops rr_mode_ops = { .port_change_mac = rr_port_change_mac, }; -static const struct team_mode rr_mode = { +static struct team_mode rr_mode = { .kind = "roundrobin", .owner = THIS_MODULE, .priv_size = sizeof(struct rr_priv), diff --git a/trunk/drivers/net/usb/qmi_wwan.c b/trunk/drivers/net/usb/qmi_wwan.c index f1e779135899..3b206786b5e7 100644 --- a/trunk/drivers/net/usb/qmi_wwan.c +++ b/trunk/drivers/net/usb/qmi_wwan.c @@ -1,10 +1,6 @@ /* * Copyright (c) 2012 Bjørn Mork * - * The probing code is heavily inspired by cdc_ether, which is: - * Copyright (C) 2003-2005 by David Brownell - * Copyright (C) 2006 by Ole Andre Vadla Ravnas (ActiveSync) - * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. @@ -19,7 +15,11 @@ #include #include -/* This driver supports wwan (3G/LTE/?) devices using a vendor +/* The name of the CDC Device Management driver */ +#define DM_DRIVER "cdc_wdm" + +/* + * This driver supports wwan (3G/LTE/?) devices using a vendor * specific management protocol called Qualcomm MSM Interface (QMI) - * in addition to the more common AT commands over serial interface * management @@ -31,99 +31,33 @@ * management protocol is used in place of the standard CDC * notifications NOTIFY_NETWORK_CONNECTION and NOTIFY_SPEED_CHANGE * - * Alternatively, control and data functions can be combined in a - * single USB interface. - * * Handling a protocol like QMI is out of the scope for any driver. - * It is exported as a character device using the cdc-wdm driver as - * a subdriver, enabling userspace applications ("modem managers") to - * handle it. + * It can be exported as a character device using the cdc-wdm driver, + * which will enable userspace applications ("modem managers") to + * handle it. This may be required to use the network interface + * provided by the driver. * * These devices may alternatively/additionally be configured using AT - * commands on a serial interface + * commands on any of the serial interfaces driven by the option driver + * + * This driver binds only to the data ("slave") interface to enable + * the cdc-wdm driver to bind to the control interface. It still + * parses the CDC functional descriptors on the control interface to + * a) verify that this is indeed a handled interface (CDC Union + * header lists it as slave) + * b) get MAC address and other ethernet config from the CDC Ethernet + * header + * c) enable user bind requests against the control interface, which + * is the common way to bind to CDC Ethernet Control Model type + * interfaces + * d) provide a hint to the user about which interface is the + * corresponding management interface */ -/* driver specific data */ -struct qmi_wwan_state { - struct usb_driver *subdriver; - atomic_t pmcount; - unsigned long unused; - struct usb_interface *control; - struct usb_interface *data; -}; - -/* using a counter to merge subdriver requests with our own into a combined state */ -static int qmi_wwan_manage_power(struct usbnet *dev, int on) -{ - struct qmi_wwan_state *info = (void *)&dev->data; - int rv = 0; - - dev_dbg(&dev->intf->dev, "%s() pmcount=%d, on=%d\n", __func__, atomic_read(&info->pmcount), on); - - if ((on && atomic_add_return(1, &info->pmcount) == 1) || (!on && atomic_dec_and_test(&info->pmcount))) { - /* need autopm_get/put here to ensure the usbcore sees the new value */ - rv = usb_autopm_get_interface(dev->intf); - if (rv < 0) - goto err; - dev->intf->needs_remote_wakeup = on; - usb_autopm_put_interface(dev->intf); - } -err: - return rv; -} - -static int qmi_wwan_cdc_wdm_manage_power(struct usb_interface *intf, int on) -{ - struct usbnet *dev = usb_get_intfdata(intf); - return qmi_wwan_manage_power(dev, on); -} - -/* collect all three endpoints and register subdriver */ -static int qmi_wwan_register_subdriver(struct usbnet *dev) -{ - int rv; - struct usb_driver *subdriver = NULL; - struct qmi_wwan_state *info = (void *)&dev->data; - - /* collect bulk endpoints */ - rv = usbnet_get_endpoints(dev, info->data); - if (rv < 0) - goto err; - - /* update status endpoint if separate control interface */ - if (info->control != info->data) - dev->status = &info->control->cur_altsetting->endpoint[0]; - - /* require interrupt endpoint for subdriver */ - if (!dev->status) { - rv = -EINVAL; - goto err; - } - - /* for subdriver power management */ - atomic_set(&info->pmcount, 0); - - /* register subdriver */ - subdriver = usb_cdc_wdm_register(info->control, &dev->status->desc, 512, &qmi_wwan_cdc_wdm_manage_power); - if (IS_ERR(subdriver)) { - dev_err(&info->control->dev, "subdriver registration failed\n"); - rv = PTR_ERR(subdriver); - goto err; - } - - /* prevent usbnet from using status endpoint */ - dev->status = NULL; - - /* save subdriver struct for suspend/resume wrappers */ - info->subdriver = subdriver; - -err: - return rv; -} - static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf) { int status = -1; + struct usb_interface *control = NULL; u8 *buf = intf->cur_altsetting->extra; int len = intf->cur_altsetting->extralen; struct usb_interface_descriptor *desc = &intf->cur_altsetting->desc; @@ -131,14 +65,25 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf) struct usb_cdc_ether_desc *cdc_ether = NULL; u32 required = 1 << USB_CDC_HEADER_TYPE | 1 << USB_CDC_UNION_TYPE; u32 found = 0; - struct usb_driver *driver = driver_of(intf); - struct qmi_wwan_state *info = (void *)&dev->data; + atomic_t *pmcount = (void *)&dev->data[1]; - BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data) < sizeof(struct qmi_wwan_state))); + atomic_set(pmcount, 0); - /* require a single interrupt status endpoint for subdriver */ - if (intf->cur_altsetting->desc.bNumEndpoints != 1) - goto err; + /* + * assume a data interface has no additional descriptors and + * that the control and data interface are numbered + * consecutively - this holds for the Huawei device at least + */ + if (len == 0 && desc->bInterfaceNumber > 0) { + control = usb_ifnum_to_if(dev->udev, desc->bInterfaceNumber - 1); + if (!control) + goto err; + + buf = control->cur_altsetting->extra; + len = control->cur_altsetting->extralen; + dev_dbg(&intf->dev, "guessing \"control\" => %s, \"data\" => this\n", + dev_name(&control->dev)); + } while (len > 3) { struct usb_descriptor_header *h = (void *)buf; @@ -202,17 +147,10 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf) goto err; } - /* verify CDC Union */ - if (desc->bInterfaceNumber != cdc_union->bMasterInterface0) { - dev_err(&intf->dev, "bogus CDC Union: master=%u\n", cdc_union->bMasterInterface0); - goto err; - } - - /* need to save these for unbind */ - info->control = intf; - info->data = usb_ifnum_to_if(dev->udev, cdc_union->bSlaveInterface0); - if (!info->data) { - dev_err(&intf->dev, "bogus CDC Union: slave=%u\n", cdc_union->bSlaveInterface0); + /* give the user a helpful hint if trying to bind to the wrong interface */ + if (cdc_union && desc->bInterfaceNumber == cdc_union->bMasterInterface0) { + dev_err(&intf->dev, "leaving \"control\" interface for " DM_DRIVER " - try binding to %s instead!\n", + dev_name(&usb_ifnum_to_if(dev->udev, cdc_union->bSlaveInterface0)->dev)); goto err; } @@ -222,29 +160,59 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf) usbnet_get_ethernet_addr(dev, cdc_ether->iMACAddress); } - /* claim data interface and set it up */ - status = usb_driver_claim_interface(driver, info->data, dev); - if (status < 0) - goto err; + /* success! point the user to the management interface */ + if (control) + dev_info(&intf->dev, "Use \"" DM_DRIVER "\" for QMI interface %s\n", + dev_name(&control->dev)); - status = qmi_wwan_register_subdriver(dev); - if (status < 0) { - usb_set_intfdata(info->data, NULL); - usb_driver_release_interface(driver, info->data); - } + /* XXX: add a sysfs symlink somewhere to help management applications find it? */ + + /* collect bulk endpoints now that we know intf == "data" interface */ + status = usbnet_get_endpoints(dev, intf); err: return status; } +/* using a counter to merge subdriver requests with our own into a combined state */ +static int qmi_wwan_manage_power(struct usbnet *dev, int on) +{ + atomic_t *pmcount = (void *)&dev->data[1]; + int rv = 0; + + dev_dbg(&dev->intf->dev, "%s() pmcount=%d, on=%d\n", __func__, atomic_read(pmcount), on); + + if ((on && atomic_add_return(1, pmcount) == 1) || (!on && atomic_dec_and_test(pmcount))) { + /* need autopm_get/put here to ensure the usbcore sees the new value */ + rv = usb_autopm_get_interface(dev->intf); + if (rv < 0) + goto err; + dev->intf->needs_remote_wakeup = on; + usb_autopm_put_interface(dev->intf); + } +err: + return rv; +} + +static int qmi_wwan_cdc_wdm_manage_power(struct usb_interface *intf, int on) +{ + struct usbnet *dev = usb_get_intfdata(intf); + return qmi_wwan_manage_power(dev, on); +} + /* Some devices combine the "control" and "data" functions into a * single interface with all three endpoints: interrupt + bulk in and * out - */ + * + * Setting up cdc-wdm as a subdriver owning the interrupt endpoint + * will let it provide userspace access to the encapsulated QMI + * protocol without interfering with the usbnet operations. + */ static int qmi_wwan_bind_shared(struct usbnet *dev, struct usb_interface *intf) { int rv; - struct qmi_wwan_state *info = (void *)&dev->data; + struct usb_driver *subdriver = NULL; + atomic_t *pmcount = (void *)&dev->data[1]; /* ZTE makes devices where the interface descriptors and endpoint * configurations of two or more interfaces are identical, even @@ -260,10 +228,30 @@ static int qmi_wwan_bind_shared(struct usbnet *dev, struct usb_interface *intf) goto err; } - /* control and data is shared */ - info->control = intf; - info->data = intf; - rv = qmi_wwan_register_subdriver(dev); + atomic_set(pmcount, 0); + + /* collect all three endpoints */ + rv = usbnet_get_endpoints(dev, intf); + if (rv < 0) + goto err; + + /* require interrupt endpoint for subdriver */ + if (!dev->status) { + rv = -EINVAL; + goto err; + } + + subdriver = usb_cdc_wdm_register(intf, &dev->status->desc, 512, &qmi_wwan_cdc_wdm_manage_power); + if (IS_ERR(subdriver)) { + rv = PTR_ERR(subdriver); + goto err; + } + + /* can't let usbnet use the interrupt endpoint */ + dev->status = NULL; + + /* save subdriver struct for suspend/resume wrappers */ + dev->data[0] = (unsigned long)subdriver; err: return rv; @@ -292,30 +280,14 @@ static int qmi_wwan_bind_gobi(struct usbnet *dev, struct usb_interface *intf) return rv; } -static void qmi_wwan_unbind(struct usbnet *dev, struct usb_interface *intf) +static void qmi_wwan_unbind_shared(struct usbnet *dev, struct usb_interface *intf) { - struct qmi_wwan_state *info = (void *)&dev->data; - struct usb_driver *driver = driver_of(intf); - struct usb_interface *other; - - if (info->subdriver && info->subdriver->disconnect) - info->subdriver->disconnect(info->control); - - /* allow user to unbind using either control or data */ - if (intf == info->control) - other = info->data; - else - other = info->control; - - /* only if not shared */ - if (other && intf != other) { - usb_set_intfdata(other, NULL); - usb_driver_release_interface(driver, other); - } + struct usb_driver *subdriver = (void *)dev->data[0]; - info->subdriver = NULL; - info->data = NULL; - info->control = NULL; + if (subdriver && subdriver->disconnect) + subdriver->disconnect(intf); + + dev->data[0] = (unsigned long)NULL; } /* suspend/resume wrappers calling both usbnet and the cdc-wdm @@ -327,15 +299,15 @@ static void qmi_wwan_unbind(struct usbnet *dev, struct usb_interface *intf) static int qmi_wwan_suspend(struct usb_interface *intf, pm_message_t message) { struct usbnet *dev = usb_get_intfdata(intf); - struct qmi_wwan_state *info = (void *)&dev->data; + struct usb_driver *subdriver = (void *)dev->data[0]; int ret; ret = usbnet_suspend(intf, message); if (ret < 0) goto err; - if (info->subdriver && info->subdriver->suspend) - ret = info->subdriver->suspend(intf, message); + if (subdriver && subdriver->suspend) + ret = subdriver->suspend(intf, message); if (ret < 0) usbnet_resume(intf); err: @@ -345,33 +317,33 @@ static int qmi_wwan_suspend(struct usb_interface *intf, pm_message_t message) static int qmi_wwan_resume(struct usb_interface *intf) { struct usbnet *dev = usb_get_intfdata(intf); - struct qmi_wwan_state *info = (void *)&dev->data; + struct usb_driver *subdriver = (void *)dev->data[0]; int ret = 0; - if (info->subdriver && info->subdriver->resume) - ret = info->subdriver->resume(intf); + if (subdriver && subdriver->resume) + ret = subdriver->resume(intf); if (ret < 0) goto err; ret = usbnet_resume(intf); - if (ret < 0 && info->subdriver && info->subdriver->resume && info->subdriver->suspend) - info->subdriver->suspend(intf, PMSG_SUSPEND); + if (ret < 0 && subdriver && subdriver->resume && subdriver->suspend) + subdriver->suspend(intf, PMSG_SUSPEND); err: return ret; } + static const struct driver_info qmi_wwan_info = { - .description = "WWAN/QMI device", + .description = "QMI speaking wwan device", .flags = FLAG_WWAN, .bind = qmi_wwan_bind, - .unbind = qmi_wwan_unbind, .manage_power = qmi_wwan_manage_power, }; static const struct driver_info qmi_wwan_shared = { - .description = "WWAN/QMI device", + .description = "QMI speaking wwan device with combined interface", .flags = FLAG_WWAN, .bind = qmi_wwan_bind_shared, - .unbind = qmi_wwan_unbind, + .unbind = qmi_wwan_unbind_shared, .manage_power = qmi_wwan_manage_power, }; @@ -379,7 +351,7 @@ static const struct driver_info qmi_wwan_gobi = { .description = "Qualcomm Gobi wwan/QMI device", .flags = FLAG_WWAN, .bind = qmi_wwan_bind_gobi, - .unbind = qmi_wwan_unbind, + .unbind = qmi_wwan_unbind_shared, .manage_power = qmi_wwan_manage_power, }; @@ -388,7 +360,7 @@ static const struct driver_info qmi_wwan_force_int1 = { .description = "Qualcomm WWAN/QMI device", .flags = FLAG_WWAN, .bind = qmi_wwan_bind_shared, - .unbind = qmi_wwan_unbind, + .unbind = qmi_wwan_unbind_shared, .manage_power = qmi_wwan_manage_power, .data = BIT(1), /* interface whitelist bitmap */ }; @@ -397,7 +369,7 @@ static const struct driver_info qmi_wwan_force_int4 = { .description = "Qualcomm WWAN/QMI device", .flags = FLAG_WWAN, .bind = qmi_wwan_bind_shared, - .unbind = qmi_wwan_unbind, + .unbind = qmi_wwan_unbind_shared, .manage_power = qmi_wwan_manage_power, .data = BIT(4), /* interface whitelist bitmap */ }; @@ -419,7 +391,7 @@ static const struct driver_info qmi_wwan_sierra = { .description = "Sierra Wireless wwan/QMI device", .flags = FLAG_WWAN, .bind = qmi_wwan_bind_gobi, - .unbind = qmi_wwan_unbind, + .unbind = qmi_wwan_unbind_shared, .manage_power = qmi_wwan_manage_power, .data = BIT(8) | BIT(19), /* interface whitelist bitmap */ }; @@ -435,7 +407,7 @@ static const struct usb_device_id products[] = { .idVendor = HUAWEI_VENDOR_ID, .bInterfaceClass = USB_CLASS_VENDOR_SPEC, .bInterfaceSubClass = 1, - .bInterfaceProtocol = 9, /* CDC Ethernet *control* interface */ + .bInterfaceProtocol = 8, /* NOTE: This is the *slave* interface of the CDC Union! */ .driver_info = (unsigned long)&qmi_wwan_info, }, { /* Vodafone/Huawei K5005 (12d1:14c8) and similar modems */ @@ -443,7 +415,7 @@ static const struct usb_device_id products[] = { .idVendor = HUAWEI_VENDOR_ID, .bInterfaceClass = USB_CLASS_VENDOR_SPEC, .bInterfaceSubClass = 1, - .bInterfaceProtocol = 57, /* CDC Ethernet *control* interface */ + .bInterfaceProtocol = 56, /* NOTE: This is the *slave* interface of the CDC Union! */ .driver_info = (unsigned long)&qmi_wwan_info, }, { /* Huawei E392, E398 and possibly others in "Windows mode" @@ -593,7 +565,17 @@ static struct usb_driver qmi_wwan_driver = { .disable_hub_initiated_lpm = 1, }; -module_usb_driver(qmi_wwan_driver); +static int __init qmi_wwan_init(void) +{ + return usb_register(&qmi_wwan_driver); +} +module_init(qmi_wwan_init); + +static void __exit qmi_wwan_exit(void) +{ + usb_deregister(&qmi_wwan_driver); +} +module_exit(qmi_wwan_exit); MODULE_AUTHOR("Bjørn Mork "); MODULE_DESCRIPTION("Qualcomm MSM Interface (QMI) WWAN driver"); diff --git a/trunk/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/trunk/drivers/net/wireless/ath/ath9k/ar9003_mac.c index 78816b8b2173..d9e0824af093 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/ar9003_mac.c +++ b/trunk/drivers/net/wireless/ath/ath9k/ar9003_mac.c @@ -181,14 +181,11 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked) u32 mask2 = 0; struct ath9k_hw_capabilities *pCap = &ah->caps; struct ath_common *common = ath9k_hw_common(ah); - u32 sync_cause = 0, async_cause, async_mask = AR_INTR_MAC_IRQ; - - if (ath9k_hw_mci_is_enabled(ah)) - async_mask |= AR_INTR_ASYNC_MASK_MCI; + u32 sync_cause = 0, async_cause; async_cause = REG_READ(ah, AR_INTR_ASYNC_CAUSE); - if (async_cause & async_mask) { + if (async_cause & (AR_INTR_MAC_IRQ | AR_INTR_ASYNC_MASK_MCI)) { if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M) == AR_RTC_STATUS_ON) isr = REG_READ(ah, AR_ISR); diff --git a/trunk/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/trunk/drivers/net/wireless/ath/ath9k/ar9003_mci.c index cc2853ade8f8..b1ced2a76da3 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/ar9003_mci.c +++ b/trunk/drivers/net/wireless/ath/ath9k/ar9003_mci.c @@ -321,7 +321,7 @@ void ar9003_mci_set_full_sleep(struct ath_hw *ah) { struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; - if (ar9003_mci_state(ah, MCI_STATE_ENABLE) && + if (ar9003_mci_state(ah, MCI_STATE_ENABLE, NULL) && (mci->bt_state != MCI_BT_SLEEP) && !mci->halted_bt_gpm) { ar9003_mci_send_coex_halt_bt_gpm(ah, true, true); @@ -484,7 +484,7 @@ static void ar9003_mci_sync_bt_state(struct ath_hw *ah) struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; u32 cur_bt_state; - cur_bt_state = ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP); + cur_bt_state = ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL); if (mci->bt_state != cur_bt_state) mci->bt_state = cur_bt_state; @@ -593,7 +593,8 @@ static u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type, if (!time_out) break; - offset = ar9003_mci_get_next_gpm_offset(ah, false, &more_data); + offset = ar9003_mci_state(ah, MCI_STATE_NEXT_GPM_OFFSET, + &more_data); if (offset == MCI_GPM_INVALID) continue; @@ -657,7 +658,8 @@ static u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type, time_out = 0; while (more_data == MCI_GPM_MORE) { - offset = ar9003_mci_get_next_gpm_offset(ah, false, &more_data); + offset = ar9003_mci_state(ah, MCI_STATE_NEXT_GPM_OFFSET, + &more_data); if (offset == MCI_GPM_INVALID) break; @@ -891,16 +893,13 @@ void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g, udelay(100); } - /* Check pending GPM msg before MCI Reset Rx */ - ar9003_mci_check_gpm_offset(ah); - regval |= SM(1, AR_MCI_COMMAND2_RESET_RX); REG_WRITE(ah, AR_MCI_COMMAND2, regval); udelay(1); regval &= ~SM(1, AR_MCI_COMMAND2_RESET_RX); REG_WRITE(ah, AR_MCI_COMMAND2, regval); - ar9003_mci_get_next_gpm_offset(ah, true, NULL); + ar9003_mci_state(ah, MCI_STATE_INIT_GPM_OFFSET, NULL); REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE, (SM(0xe801, AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR) | @@ -1011,32 +1010,38 @@ static void ar9003_mci_queue_unsent_gpm(struct ath_hw *ah, u8 header, } } -void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool force) +void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool wait_done) { struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; - if (!mci->update_2g5g && !force) + if (!mci->update_2g5g) return; if (mci->is_2g) { ar9003_mci_send_2g5g_status(ah, true); + ar9003_mci_send_lna_transfer(ah, true); + udelay(5); - REG_SET_BIT(ah, AR_MCI_TX_CTRL, + REG_CLR_BIT(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE); REG_CLR_BIT(ah, AR_PHY_GLB_CONTROL, AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL); if (!(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA)) - ar9003_mci_osla_setup(ah, true); + REG_SET_BIT(ah, AR_BTCOEX_CTRL, + AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN); } else { + ar9003_mci_send_lna_take(ah, true); + udelay(5); + REG_SET_BIT(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE); REG_SET_BIT(ah, AR_PHY_GLB_CONTROL, AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL); + REG_CLR_BIT(ah, AR_BTCOEX_CTRL, + AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN); - ar9003_mci_osla_setup(ah, false); - if (!force) - ar9003_mci_send_2g5g_status(ah, true); + ar9003_mci_send_2g5g_status(ah, true); } } @@ -1164,10 +1169,11 @@ void ar9003_mci_cleanup(struct ath_hw *ah) } EXPORT_SYMBOL(ar9003_mci_cleanup); -u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type) +u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data) { + struct ath_common *common = ath9k_hw_common(ah); struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; - u32 value = 0; + u32 value = 0, more_gpm = 0, gpm_ptr; u8 query_type; switch (state_type) { @@ -1179,6 +1185,81 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type) value = 0; } value &= AR_BTCOEX_CTRL_MCI_MODE_EN; + break; + case MCI_STATE_INIT_GPM_OFFSET: + value = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR); + mci->gpm_idx = value; + break; + case MCI_STATE_NEXT_GPM_OFFSET: + case MCI_STATE_LAST_GPM_OFFSET: + /* + * This could be useful to avoid new GPM message interrupt which + * may lead to spurious interrupt after power sleep, or multiple + * entry of ath_mci_intr(). + * Adding empty GPM check by returning HAL_MCI_GPM_INVALID can + * alleviate this effect, but clearing GPM RX interrupt bit is + * safe, because whether this is called from hw or driver code + * there must be an interrupt bit set/triggered initially + */ + REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, + AR_MCI_INTERRUPT_RX_MSG_GPM); + + gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR); + value = gpm_ptr; + + if (value == 0) + value = mci->gpm_len - 1; + else if (value >= mci->gpm_len) { + if (value != 0xFFFF) + value = 0; + } else { + value--; + } + + if (value == 0xFFFF) { + value = MCI_GPM_INVALID; + more_gpm = MCI_GPM_NOMORE; + } else if (state_type == MCI_STATE_NEXT_GPM_OFFSET) { + if (gpm_ptr == mci->gpm_idx) { + value = MCI_GPM_INVALID; + more_gpm = MCI_GPM_NOMORE; + } else { + for (;;) { + u32 temp_index; + + /* skip reserved GPM if any */ + + if (value != mci->gpm_idx) + more_gpm = MCI_GPM_MORE; + else + more_gpm = MCI_GPM_NOMORE; + + temp_index = mci->gpm_idx; + mci->gpm_idx++; + + if (mci->gpm_idx >= + mci->gpm_len) + mci->gpm_idx = 0; + + if (ar9003_mci_is_gpm_valid(ah, + temp_index)) { + value = temp_index; + break; + } + + if (more_gpm == MCI_GPM_NOMORE) { + value = MCI_GPM_INVALID; + break; + } + } + } + if (p_data) + *p_data = more_gpm; + } + + if (value != MCI_GPM_INVALID) + value <<= 4; + break; case MCI_STATE_LAST_SCHD_MSG_OFFSET: value = MS(REG_READ(ah, AR_MCI_RX_STATUS), @@ -1191,6 +1272,21 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type) AR_MCI_RX_REMOTE_SLEEP) ? MCI_BT_SLEEP : MCI_BT_AWAKE; break; + case MCI_STATE_CONT_RSSI_POWER: + value = MS(mci->cont_status, AR_MCI_CONT_RSSI_POWER); + break; + case MCI_STATE_CONT_PRIORITY: + value = MS(mci->cont_status, AR_MCI_CONT_RRIORITY); + break; + case MCI_STATE_CONT_TXRX: + value = MS(mci->cont_status, AR_MCI_CONT_TXRX); + break; + case MCI_STATE_BT: + value = mci->bt_state; + break; + case MCI_STATE_SET_BT_SLEEP: + mci->bt_state = MCI_BT_SLEEP; + break; case MCI_STATE_SET_BT_AWAKE: mci->bt_state = MCI_BT_AWAKE; ar9003_mci_send_coex_version_query(ah, true); @@ -1199,7 +1295,7 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type) if (mci->unhalt_bt_gpm) ar9003_mci_send_coex_halt_bt_gpm(ah, false, true); - ar9003_mci_2g5g_switch(ah, false); + ar9003_mci_2g5g_switch(ah, true); break; case MCI_STATE_SET_BT_CAL_START: mci->bt_state = MCI_BT_CAL_START; @@ -1223,6 +1319,34 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type) case MCI_STATE_SEND_WLAN_COEX_VERSION: ar9003_mci_send_coex_version_response(ah, true); break; + case MCI_STATE_SET_BT_COEX_VERSION: + if (!p_data) + ath_dbg(common, MCI, + "MCI Set BT Coex version with NULL data!!\n"); + else { + mci->bt_ver_major = (*p_data >> 8) & 0xff; + mci->bt_ver_minor = (*p_data) & 0xff; + mci->bt_version_known = true; + ath_dbg(common, MCI, "MCI BT version set: %d.%d\n", + mci->bt_ver_major, mci->bt_ver_minor); + } + break; + case MCI_STATE_SEND_WLAN_CHANNELS: + if (p_data) { + if (((mci->wlan_channels[1] & 0xffff0000) == + (*(p_data + 1) & 0xffff0000)) && + (mci->wlan_channels[2] == *(p_data + 2)) && + (mci->wlan_channels[3] == *(p_data + 3))) + break; + + mci->wlan_channels[0] = *p_data++; + mci->wlan_channels[1] = *p_data++; + mci->wlan_channels[2] = *p_data++; + mci->wlan_channels[3] = *p_data++; + } + mci->wlan_channels_update = true; + ar9003_mci_send_coex_wlan_channels(ah, true); + break; case MCI_STATE_SEND_VERSION_QUERY: ar9003_mci_send_coex_version_query(ah, true); break; @@ -1230,12 +1354,29 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type) query_type = MCI_GPM_COEX_QUERY_BT_TOPOLOGY; ar9003_mci_send_coex_bt_status_query(ah, true, query_type); break; + case MCI_STATE_NEED_FLUSH_BT_INFO: + /* + * btcoex_hw.mci.unhalt_bt_gpm means whether it's + * needed to send UNHALT message. It's set whenever + * there's a request to send HALT message. + * mci_halted_bt_gpm means whether HALT message is sent + * out successfully. + * + * Checking (mci_unhalt_bt_gpm == false) instead of + * checking (ah->mci_halted_bt_gpm == false) will make + * sure currently is in UNHALT-ed mode and BT can + * respond to status query. + */ + value = (!mci->unhalt_bt_gpm && mci->need_flush_btinfo) ? 1 : 0; + if (p_data) + mci->need_flush_btinfo = (*p_data != 0) ? true : false; + break; case MCI_STATE_RECOVER_RX: ar9003_mci_prep_interface(ah); mci->query_bt = true; mci->need_flush_btinfo = true; ar9003_mci_send_coex_wlan_channels(ah, true); - ar9003_mci_2g5g_switch(ah, false); + ar9003_mci_2g5g_switch(ah, true); break; case MCI_STATE_NEED_FTP_STOMP: value = !(mci->config & ATH_MCI_CONFIG_DISABLE_FTP_STOMP); @@ -1263,154 +1404,3 @@ void ar9003_mci_bt_gain_ctrl(struct ath_hw *ah) /* Force another 2g5g update at next scanning */ mci->update_2g5g = true; } - -void ar9003_mci_set_power_awake(struct ath_hw *ah) -{ - u32 btcoex_ctrl2, diag_sw; - int i; - u8 lna_ctrl, bt_sleep; - - for (i = 0; i < AH_WAIT_TIMEOUT; i++) { - btcoex_ctrl2 = REG_READ(ah, AR_BTCOEX_CTRL2); - if (btcoex_ctrl2 != 0xdeadbeef) - break; - udelay(AH_TIME_QUANTUM); - } - REG_WRITE(ah, AR_BTCOEX_CTRL2, (btcoex_ctrl2 | BIT(23))); - - for (i = 0; i < AH_WAIT_TIMEOUT; i++) { - diag_sw = REG_READ(ah, AR_DIAG_SW); - if (diag_sw != 0xdeadbeef) - break; - udelay(AH_TIME_QUANTUM); - } - REG_WRITE(ah, AR_DIAG_SW, (diag_sw | BIT(27) | BIT(19) | BIT(18))); - lna_ctrl = REG_READ(ah, AR_OBS_BUS_CTRL) & 0x3; - bt_sleep = REG_READ(ah, AR_MCI_RX_STATUS) & AR_MCI_RX_REMOTE_SLEEP; - - REG_WRITE(ah, AR_BTCOEX_CTRL2, btcoex_ctrl2); - REG_WRITE(ah, AR_DIAG_SW, diag_sw); - - if (bt_sleep && (lna_ctrl == 2)) { - REG_SET_BIT(ah, AR_BTCOEX_RC, 0x1); - REG_CLR_BIT(ah, AR_BTCOEX_RC, 0x1); - udelay(50); - } -} - -void ar9003_mci_check_gpm_offset(struct ath_hw *ah) -{ - struct ath_common *common = ath9k_hw_common(ah); - struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; - u32 offset; - - /* - * This should only be called before "MAC Warm Reset" or "MCI Reset Rx". - */ - offset = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR); - if (mci->gpm_idx == offset) - return; - ath_dbg(common, MCI, "GPM cached write pointer mismatch %d %d\n", - mci->gpm_idx, offset); - mci->query_bt = true; - mci->need_flush_btinfo = true; - mci->gpm_idx = 0; -} - -u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, bool first, u32 *more) -{ - struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; - u32 offset, more_gpm = 0, gpm_ptr; - - if (first) { - gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR); - mci->gpm_idx = gpm_ptr; - return gpm_ptr; - } - - /* - * This could be useful to avoid new GPM message interrupt which - * may lead to spurious interrupt after power sleep, or multiple - * entry of ath_mci_intr(). - * Adding empty GPM check by returning HAL_MCI_GPM_INVALID can - * alleviate this effect, but clearing GPM RX interrupt bit is - * safe, because whether this is called from hw or driver code - * there must be an interrupt bit set/triggered initially - */ - REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, - AR_MCI_INTERRUPT_RX_MSG_GPM); - - gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR); - offset = gpm_ptr; - - if (!offset) - offset = mci->gpm_len - 1; - else if (offset >= mci->gpm_len) { - if (offset != 0xFFFF) - offset = 0; - } else { - offset--; - } - - if ((offset == 0xFFFF) || (gpm_ptr == mci->gpm_idx)) { - offset = MCI_GPM_INVALID; - more_gpm = MCI_GPM_NOMORE; - goto out; - } - for (;;) { - u32 temp_index; - - /* skip reserved GPM if any */ - - if (offset != mci->gpm_idx) - more_gpm = MCI_GPM_MORE; - else - more_gpm = MCI_GPM_NOMORE; - - temp_index = mci->gpm_idx; - mci->gpm_idx++; - - if (mci->gpm_idx >= mci->gpm_len) - mci->gpm_idx = 0; - - if (ar9003_mci_is_gpm_valid(ah, temp_index)) { - offset = temp_index; - break; - } - - if (more_gpm == MCI_GPM_NOMORE) { - offset = MCI_GPM_INVALID; - break; - } - } - - if (offset != MCI_GPM_INVALID) - offset <<= 4; -out: - if (more) - *more = more_gpm; - - return offset; -} -EXPORT_SYMBOL(ar9003_mci_get_next_gpm_offset); - -void ar9003_mci_set_bt_version(struct ath_hw *ah, u8 major, u8 minor) -{ - struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; - - mci->bt_ver_major = major; - mci->bt_ver_minor = minor; - mci->bt_version_known = true; - ath_dbg(ath9k_hw_common(ah), MCI, "MCI BT version set: %d.%d\n", - mci->bt_ver_major, mci->bt_ver_minor); -} -EXPORT_SYMBOL(ar9003_mci_set_bt_version); - -void ar9003_mci_send_wlan_channels(struct ath_hw *ah) -{ - struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; - - mci->wlan_channels_update = true; - ar9003_mci_send_coex_wlan_channels(ah, true); -} -EXPORT_SYMBOL(ar9003_mci_send_wlan_channels); diff --git a/trunk/drivers/net/wireless/ath/ath9k/ar9003_mci.h b/trunk/drivers/net/wireless/ath/ath9k/ar9003_mci.h index d33b8e128855..10282e2bcdc9 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/ar9003_mci.h +++ b/trunk/drivers/net/wireless/ath/ath9k/ar9003_mci.h @@ -189,15 +189,26 @@ enum mci_bt_state { /* Type of state query */ enum mci_state_type { MCI_STATE_ENABLE, + MCI_STATE_INIT_GPM_OFFSET, + MCI_STATE_NEXT_GPM_OFFSET, + MCI_STATE_LAST_GPM_OFFSET, + MCI_STATE_BT, + MCI_STATE_SET_BT_SLEEP, MCI_STATE_SET_BT_AWAKE, MCI_STATE_SET_BT_CAL_START, MCI_STATE_SET_BT_CAL, MCI_STATE_LAST_SCHD_MSG_OFFSET, MCI_STATE_REMOTE_SLEEP, + MCI_STATE_CONT_RSSI_POWER, + MCI_STATE_CONT_PRIORITY, + MCI_STATE_CONT_TXRX, MCI_STATE_RESET_REQ_WAKE, MCI_STATE_SEND_WLAN_COEX_VERSION, + MCI_STATE_SET_BT_COEX_VERSION, + MCI_STATE_SEND_WLAN_CHANNELS, MCI_STATE_SEND_VERSION_QUERY, MCI_STATE_SEND_STATUS_QUERY, + MCI_STATE_NEED_FLUSH_BT_INFO, MCI_STATE_SET_CONCUR_TX_PRI, MCI_STATE_RECOVER_RX, MCI_STATE_NEED_FTP_STOMP, @@ -248,15 +259,14 @@ enum mci_gpm_coex_opcode { bool ar9003_mci_send_message(struct ath_hw *ah, u8 header, u32 flag, u32 *payload, u8 len, bool wait_done, bool check_bt); -u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type); +u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data); void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf, u16 len, u32 sched_addr); void ar9003_mci_cleanup(struct ath_hw *ah); void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr, u32 *rx_msg_intr); -u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, bool first, u32 *more); -void ar9003_mci_set_bt_version(struct ath_hw *ah, u8 major, u8 minor); -void ar9003_mci_send_wlan_channels(struct ath_hw *ah); +void ar9003_mci_bt_gain_ctrl(struct ath_hw *ah); + /* * These functions are used by ath9k_hw. */ @@ -267,7 +277,7 @@ void ar9003_mci_stop_bt(struct ath_hw *ah, bool save_fullsleep); void ar9003_mci_init_cal_req(struct ath_hw *ah, bool *is_reusable); void ar9003_mci_init_cal_done(struct ath_hw *ah); void ar9003_mci_set_full_sleep(struct ath_hw *ah); -void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool force); +void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool wait_done); void ar9003_mci_check_bt(struct ath_hw *ah); bool ar9003_mci_start_reset(struct ath_hw *ah, struct ath9k_channel *chan); int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan, @@ -275,9 +285,6 @@ int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan, void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g, bool is_full_sleep); void ar9003_mci_get_isr(struct ath_hw *ah, enum ath9k_int *masked); -void ar9003_mci_bt_gain_ctrl(struct ath_hw *ah); -void ar9003_mci_set_power_awake(struct ath_hw *ah); -void ar9003_mci_check_gpm_offset(struct ath_hw *ah); #else @@ -315,15 +322,6 @@ static inline void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g, static inline void ar9003_mci_get_isr(struct ath_hw *ah, enum ath9k_int *masked) { } -static inline void ar9003_mci_bt_gain_ctrl(struct ath_hw *ah) -{ -} -static inline void ar9003_mci_set_power_awake(struct ath_hw *ah) -{ -} -static inline void ar9003_mci_check_gpm_offset(struct ath_hw *ah) -{ -} #endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */ #endif diff --git a/trunk/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/trunk/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h index 8f406ff2c95e..4a93e1534c1d 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h +++ b/trunk/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h @@ -52,7 +52,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = { {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020}, {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000d8}, {0x00009e10, 0x92c88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e}, - {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3376605e, 0x32395d5e}, + {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3376605e, 0x33795d5e}, {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce}, diff --git a/trunk/drivers/net/wireless/ath/ath9k/ath9k.h b/trunk/drivers/net/wireless/ath/ath9k/ath9k.h index a8c050085648..02fc1c1e5eeb 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/trunk/drivers/net/wireless/ath/ath9k/ath9k.h @@ -698,7 +698,6 @@ struct ath_softc { #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT struct ath_btcoex btcoex; struct ath_mci_coex mci_coex; - struct work_struct mci_work; #endif struct ath_descdma txsdma; diff --git a/trunk/drivers/net/wireless/ath/ath9k/gpio.c b/trunk/drivers/net/wireless/ath/ath9k/gpio.c index 26032cb59b8a..af6d27350291 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/gpio.c +++ b/trunk/drivers/net/wireless/ath/ath9k/gpio.c @@ -202,7 +202,7 @@ static void ath_btcoex_period_timer(unsigned long data) btcoex->bt_wait_time += btcoex->btcoex_period; if (btcoex->bt_wait_time > ATH_BTCOEX_RX_WAIT_TIME) { - if (ar9003_mci_state(ah, MCI_STATE_NEED_FTP_STOMP) && + if (ar9003_mci_state(ah, MCI_STATE_NEED_FTP_STOMP, NULL) && (mci->num_pan || mci->num_other_acl)) ah->btcoex_hw.mci.stomp_ftp = (sc->rx.num_pkts < ATH_BTCOEX_STOMP_FTP_THRESH); @@ -232,7 +232,7 @@ static void ath_btcoex_period_timer(unsigned long data) } ath9k_ps_restore(sc); - timer_period = btcoex->btcoex_period; + timer_period = btcoex->btcoex_period / 1000; mod_timer(&btcoex->period_timer, jiffies + msecs_to_jiffies(timer_period)); } @@ -267,10 +267,10 @@ static int ath_init_btcoex_timer(struct ath_softc *sc) { struct ath_btcoex *btcoex = &sc->btcoex; - btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD; - btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) * 1000 * + btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD * 1000; + btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) * btcoex->btcoex_period / 100; - btcoex->btscan_no_stomp = (100 - ATH_BTCOEX_BTSCAN_DUTY_CYCLE) * 1000 * + btcoex->btscan_no_stomp = (100 - ATH_BTCOEX_BTSCAN_DUTY_CYCLE) * btcoex->btcoex_period / 100; setup_timer(&btcoex->period_timer, ath_btcoex_period_timer, diff --git a/trunk/drivers/net/wireless/ath/ath9k/hw.c b/trunk/drivers/net/wireless/ath/ath9k/hw.c index 784baee5db84..45e670087e1c 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/hw.c +++ b/trunk/drivers/net/wireless/ath/ath9k/hw.c @@ -1348,9 +1348,6 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type) } } - if (ath9k_hw_mci_is_enabled(ah)) - ar9003_mci_check_gpm_offset(ah); - REG_WRITE(ah, AR_RTC_RC, rst_flags); REGWRITE_BUFFER_FLUSH(ah); @@ -1711,7 +1708,7 @@ static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan) ath9k_hw_start_nfcal(ah, true); if (ath9k_hw_mci_is_enabled(ah)) - ar9003_mci_2g5g_switch(ah, false); + ar9003_mci_2g5g_switch(ah, true); if (AR_SREV_9271(ah)) ar9002_hw_load_ani_reg(ah, chan); @@ -1915,8 +1912,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, ath9k_hw_set_dma(ah); - if (!ath9k_hw_mci_is_enabled(ah)) - REG_WRITE(ah, AR_OBS, 8); + REG_WRITE(ah, AR_OBS, 8); if (ah->config.rx_intr_mitigation) { REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, 500); @@ -2115,9 +2111,6 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah) AR_RTC_FORCE_WAKE_EN); udelay(50); - if (ath9k_hw_mci_is_enabled(ah)) - ar9003_mci_set_power_awake(ah); - for (i = POWER_UP_TIME / 50; i > 0; i--) { val = REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M; if (val == AR_RTC_STATUS_ON) diff --git a/trunk/drivers/net/wireless/ath/ath9k/link.c b/trunk/drivers/net/wireless/ath/ath9k/link.c index a105c9426251..0cc4c70f7f0c 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/link.c +++ b/trunk/drivers/net/wireless/ath/ath9k/link.c @@ -136,14 +136,6 @@ void ath_hw_pll_work(struct work_struct *work) u32 pll_sqsum; struct ath_softc *sc = container_of(work, struct ath_softc, hw_pll_work.work); - /* - * ensure that the PLL WAR is executed only - * after the STA is associated (or) if the - * beaconing had started in interfaces that - * uses beacons. - */ - if (!test_bit(SC_OP_BEACONS, &sc->sc_flags)) - return; ath9k_ps_wakeup(sc); pll_sqsum = ar9003_get_pll_sqsum_dvc(sc->sc_ah); diff --git a/trunk/drivers/net/wireless/ath/ath9k/main.c b/trunk/drivers/net/wireless/ath/ath9k/main.c index 52561b341d68..c0f478b0a9a2 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/main.c +++ b/trunk/drivers/net/wireless/ath/ath9k/main.c @@ -150,9 +150,6 @@ static void __ath_cancel_work(struct ath_softc *sc) cancel_work_sync(&sc->hw_check_work); cancel_delayed_work_sync(&sc->tx_complete_work); cancel_delayed_work_sync(&sc->hw_pll_work); -#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT - cancel_work_sync(&sc->mci_work); -#endif } static void ath_cancel_work(struct ath_softc *sc) @@ -1036,6 +1033,15 @@ static int ath9k_add_interface(struct ieee80211_hw *hw, } } + if ((ah->opmode == NL80211_IFTYPE_ADHOC) || + ((vif->type == NL80211_IFTYPE_ADHOC) && + sc->nvifs > 0)) { + ath_err(common, "Cannot create ADHOC interface when other" + " interfaces already exist.\n"); + ret = -EINVAL; + goto out; + } + ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type); sc->nvifs++; @@ -1060,6 +1066,15 @@ static int ath9k_change_interface(struct ieee80211_hw *hw, mutex_lock(&sc->mutex); ath9k_ps_wakeup(sc); + /* See if new interface type is valid. */ + if ((new_type == NL80211_IFTYPE_ADHOC) && + (sc->nvifs > 1)) { + ath_err(common, "When using ADHOC, it must be the only" + " interface.\n"); + ret = -EINVAL; + goto out; + } + if (ath9k_uses_beacons(new_type) && !ath9k_uses_beacons(vif->type)) { if (sc->nbcnvifs >= ATH_BCBUF) { @@ -1243,7 +1258,6 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) if (ath_set_channel(sc, hw, &sc->sc_ah->channels[pos]) < 0) { ath_err(common, "Unable to set channel\n"); mutex_unlock(&sc->mutex); - ath9k_ps_restore(sc); return -EINVAL; } diff --git a/trunk/drivers/net/wireless/ath/ath9k/mci.c b/trunk/drivers/net/wireless/ath/ath9k/mci.c index 7d34a504d617..49137f477b05 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/mci.c +++ b/trunk/drivers/net/wireless/ath/ath9k/mci.c @@ -20,7 +20,7 @@ #include "ath9k.h" #include "mci.h" -static const u8 ath_mci_duty_cycle[] = { 55, 50, 60, 70, 80, 85, 90, 95, 98 }; +static const u8 ath_mci_duty_cycle[] = { 0, 50, 60, 70, 80, 85, 90, 95, 98 }; static struct ath_mci_profile_info* ath_mci_find_profile(struct ath_mci_profile *mci, @@ -28,14 +28,11 @@ ath_mci_find_profile(struct ath_mci_profile *mci, { struct ath_mci_profile_info *entry; - if (list_empty(&mci->info)) - return NULL; - list_for_each_entry(entry, &mci->info, list) { if (entry->conn_handle == info->conn_handle) - return entry; + break; } - return NULL; + return entry; } static bool ath_mci_add_profile(struct ath_common *common, @@ -52,21 +49,31 @@ static bool ath_mci_add_profile(struct ath_common *common, (info->type != MCI_GPM_COEX_PROFILE_VOICE)) return false; - entry = kzalloc(sizeof(*entry), GFP_ATOMIC); - if (!entry) - return false; + entry = ath_mci_find_profile(mci, info); - memcpy(entry, info, 10); - INC_PROF(mci, info); - list_add_tail(&entry->list, &mci->info); + if (entry) { + memcpy(entry, info, 10); + } else { + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return false; + + memcpy(entry, info, 10); + INC_PROF(mci, info); + list_add_tail(&info->list, &mci->info); + } return true; } static void ath_mci_del_profile(struct ath_common *common, struct ath_mci_profile *mci, - struct ath_mci_profile_info *entry) + struct ath_mci_profile_info *info) { + struct ath_mci_profile_info *entry; + + entry = ath_mci_find_profile(mci, info); + if (!entry) return; @@ -79,16 +86,12 @@ void ath_mci_flush_profile(struct ath_mci_profile *mci) { struct ath_mci_profile_info *info, *tinfo; - mci->aggr_limit = 0; - - if (list_empty(&mci->info)) - return; - list_for_each_entry_safe(info, tinfo, &mci->info, list) { list_del(&info->list); DEC_PROF(mci, info); kfree(info); } + mci->aggr_limit = 0; } static void ath_mci_adjust_aggr_limit(struct ath_btcoex *btcoex) @@ -120,8 +123,6 @@ static void ath_mci_update_scheme(struct ath_softc *sc) if (mci_hw->config & ATH_MCI_CONFIG_DISABLE_TUNING) goto skip_tuning; - btcoex->duty_cycle = ath_mci_duty_cycle[num_profile]; - if (num_profile == 1) { info = list_first_entry(&mci->info, struct ath_mci_profile_info, @@ -180,11 +181,12 @@ static void ath_mci_update_scheme(struct ath_softc *sc) if (IS_CHAN_5GHZ(sc->sc_ah->curchan)) return; - btcoex->duty_cycle += (mci->num_bdr ? ATH_MCI_BDR_DUTY_CYCLE : 0); + btcoex->duty_cycle += (mci->num_bdr ? ATH_MCI_MAX_DUTY_CYCLE : 0); if (btcoex->duty_cycle > ATH_MCI_MAX_DUTY_CYCLE) btcoex->duty_cycle = ATH_MCI_MAX_DUTY_CYCLE; - btcoex->btcoex_no_stomp = btcoex->btcoex_period * 1000 * + btcoex->btcoex_period *= 1000; + btcoex->btcoex_no_stomp = btcoex->btcoex_period * (100 - btcoex->duty_cycle) / 100; ath9k_hw_btcoex_enable(sc->sc_ah); @@ -195,16 +197,20 @@ static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload) { struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); - struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci; u32 payload[4] = {0, 0, 0, 0}; switch (opcode) { case MCI_GPM_BT_CAL_REQ: - if (mci_hw->bt_state == MCI_BT_AWAKE) { - ar9003_mci_state(ah, MCI_STATE_SET_BT_CAL_START); + if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_AWAKE) { + ar9003_mci_state(ah, MCI_STATE_SET_BT_CAL_START, NULL); ieee80211_queue_work(sc->hw, &sc->hw_reset_work); + } else { + ath_dbg(common, MCI, "MCI State mismatch: %d\n", + ar9003_mci_state(ah, MCI_STATE_BT, NULL)); } - ath_dbg(common, MCI, "MCI State : %d\n", mci_hw->bt_state); + break; + case MCI_GPM_BT_CAL_DONE: + ar9003_mci_state(ah, MCI_STATE_BT, NULL); break; case MCI_GPM_BT_CAL_GRANT: MCI_GPM_SET_CAL_TYPE(payload, MCI_GPM_WLAN_CAL_DONE); @@ -217,42 +223,32 @@ static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload) } } -static void ath9k_mci_work(struct work_struct *work) -{ - struct ath_softc *sc = container_of(work, struct ath_softc, mci_work); - - ath_mci_update_scheme(sc); -} - static void ath_mci_process_profile(struct ath_softc *sc, struct ath_mci_profile_info *info) { struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_btcoex *btcoex = &sc->btcoex; struct ath_mci_profile *mci = &btcoex->mci; - struct ath_mci_profile_info *entry = NULL; - - entry = ath_mci_find_profile(mci, info); - if (entry) - memcpy(entry, info, 10); if (info->start) { - if (!entry && !ath_mci_add_profile(common, mci, info)) + if (!ath_mci_add_profile(common, mci, info)) return; } else - ath_mci_del_profile(common, mci, entry); + ath_mci_del_profile(common, mci, info); btcoex->btcoex_period = ATH_MCI_DEF_BT_PERIOD; mci->aggr_limit = mci->num_sco ? 6 : 0; - btcoex->duty_cycle = ath_mci_duty_cycle[NUM_PROF(mci)]; - if (NUM_PROF(mci)) + if (NUM_PROF(mci)) { btcoex->bt_stomp_type = ATH_BTCOEX_STOMP_LOW; - else + btcoex->duty_cycle = ath_mci_duty_cycle[NUM_PROF(mci)]; + } else { btcoex->bt_stomp_type = mci->num_mgmt ? ATH_BTCOEX_STOMP_ALL : ATH_BTCOEX_STOMP_LOW; + btcoex->duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE; + } - ieee80211_queue_work(sc->hw, &sc->mci_work); + ath_mci_update_scheme(sc); } static void ath_mci_process_status(struct ath_softc *sc, @@ -267,6 +263,8 @@ static void ath_mci_process_status(struct ath_softc *sc, if (status->is_link) return; + memset(&info, 0, sizeof(struct ath_mci_profile_info)); + info.conn_handle = status->conn_handle; if (ath_mci_find_profile(mci, &info)) return; @@ -286,7 +284,7 @@ static void ath_mci_process_status(struct ath_softc *sc, } while (++i < ATH_MCI_MAX_PROFILE); if (old_num_mgmt != mci->num_mgmt) - ieee80211_queue_work(sc->hw, &sc->mci_work); + ath_mci_update_scheme(sc); } static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload) @@ -295,20 +293,25 @@ static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload) struct ath_mci_profile_info profile_info; struct ath_mci_profile_status profile_status; struct ath_common *common = ath9k_hw_common(sc->sc_ah); - u8 major, minor; + u32 version; + u8 major; + u8 minor; u32 seq_num; switch (opcode) { case MCI_GPM_COEX_VERSION_QUERY: - ar9003_mci_state(ah, MCI_STATE_SEND_WLAN_COEX_VERSION); + version = ar9003_mci_state(ah, MCI_STATE_SEND_WLAN_COEX_VERSION, + NULL); break; case MCI_GPM_COEX_VERSION_RESPONSE: major = *(rx_payload + MCI_GPM_COEX_B_MAJOR_VERSION); minor = *(rx_payload + MCI_GPM_COEX_B_MINOR_VERSION); - ar9003_mci_set_bt_version(ah, major, minor); + version = (major << 8) + minor; + version = ar9003_mci_state(ah, MCI_STATE_SET_BT_COEX_VERSION, + &version); break; case MCI_GPM_COEX_STATUS_QUERY: - ar9003_mci_send_wlan_channels(ah); + ar9003_mci_state(ah, MCI_STATE_SEND_WLAN_CHANNELS, NULL); break; case MCI_GPM_COEX_BT_PROFILE_INFO: memcpy(&profile_info, @@ -375,7 +378,6 @@ int ath_mci_setup(struct ath_softc *sc) mci->gpm_buf.bf_addr, (mci->gpm_buf.bf_len >> 4), mci->sched_buf.bf_paddr); - INIT_WORK(&sc->mci_work, ath9k_mci_work); ath_dbg(common, MCI, "MCI Initialized\n"); return 0; @@ -403,7 +405,6 @@ void ath_mci_intr(struct ath_softc *sc) struct ath_mci_coex *mci = &sc->mci_coex; struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); - struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci; u32 mci_int, mci_int_rxmsg; u32 offset, subtype, opcode; u32 *pgpm; @@ -412,8 +413,8 @@ void ath_mci_intr(struct ath_softc *sc) ar9003_mci_get_interrupt(sc->sc_ah, &mci_int, &mci_int_rxmsg); - if (ar9003_mci_state(ah, MCI_STATE_ENABLE) == 0) { - ar9003_mci_get_next_gpm_offset(ah, true, NULL); + if (ar9003_mci_state(ah, MCI_STATE_ENABLE, NULL) == 0) { + ar9003_mci_state(ah, MCI_STATE_INIT_GPM_OFFSET, NULL); return; } @@ -432,41 +433,46 @@ void ath_mci_intr(struct ath_softc *sc) NULL, 0, true, false); mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE; - ar9003_mci_state(ah, MCI_STATE_RESET_REQ_WAKE); + ar9003_mci_state(ah, MCI_STATE_RESET_REQ_WAKE, NULL); /* * always do this for recovery and 2G/5G toggling and LNA_TRANS */ - ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE); + ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE, NULL); } if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING) { mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING; - if ((mci_hw->bt_state == MCI_BT_SLEEP) && - (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP) != - MCI_BT_SLEEP)) - ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE); + if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_SLEEP) { + if (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL) != + MCI_BT_SLEEP) + ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE, + NULL); + } } if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) { mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING; - if ((mci_hw->bt_state == MCI_BT_AWAKE) && - (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP) != - MCI_BT_AWAKE)) - mci_hw->bt_state = MCI_BT_SLEEP; + if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_AWAKE) { + if (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL) != + MCI_BT_AWAKE) + ar9003_mci_state(ah, MCI_STATE_SET_BT_SLEEP, + NULL); + } } if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) || (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)) { - ar9003_mci_state(ah, MCI_STATE_RECOVER_RX); + ar9003_mci_state(ah, MCI_STATE_RECOVER_RX, NULL); skip_gpm = true; } if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO) { mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO; - offset = ar9003_mci_state(ah, MCI_STATE_LAST_SCHD_MSG_OFFSET); + offset = ar9003_mci_state(ah, MCI_STATE_LAST_SCHD_MSG_OFFSET, + NULL); } if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_GPM) { @@ -475,8 +481,8 @@ void ath_mci_intr(struct ath_softc *sc) while (more_data == MCI_GPM_MORE) { pgpm = mci->gpm_buf.bf_addr; - offset = ar9003_mci_get_next_gpm_offset(ah, false, - &more_data); + offset = ar9003_mci_state(ah, MCI_STATE_NEXT_GPM_OFFSET, + &more_data); if (offset == MCI_GPM_INVALID) break; @@ -517,17 +523,23 @@ void ath_mci_intr(struct ath_softc *sc) mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_INFO; if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO) { - int value_dbm = MS(mci_hw->cont_status, - AR_MCI_CONT_RSSI_POWER); + int value_dbm = ar9003_mci_state(ah, + MCI_STATE_CONT_RSSI_POWER, NULL); mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_INFO; - ath_dbg(common, MCI, - "MCI CONT_INFO: (%s) pri = %d pwr = %d dBm\n", - MS(mci_hw->cont_status, AR_MCI_CONT_TXRX) ? - "tx" : "rx", - MS(mci_hw->cont_status, AR_MCI_CONT_PRIORITY), - value_dbm); + if (ar9003_mci_state(ah, MCI_STATE_CONT_TXRX, NULL)) + ath_dbg(common, MCI, + "MCI CONT_INFO: (tx) pri = %d, pwr = %d dBm\n", + ar9003_mci_state(ah, + MCI_STATE_CONT_PRIORITY, NULL), + value_dbm); + else + ath_dbg(common, MCI, + "MCI CONT_INFO: (rx) pri = %d,pwr = %d dBm\n", + ar9003_mci_state(ah, + MCI_STATE_CONT_PRIORITY, NULL), + value_dbm); } if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_NACK) diff --git a/trunk/drivers/net/wireless/ath/ath9k/rc.c b/trunk/drivers/net/wireless/ath/ath9k/rc.c index e034add9cd5a..92a6c0a87f89 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/rc.c +++ b/trunk/drivers/net/wireless/ath/ath9k/rc.c @@ -770,7 +770,7 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta, struct ieee80211_tx_rate *rates = tx_info->control.rates; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; __le16 fc = hdr->frame_control; - u8 try_per_rate, i = 0, rix; + u8 try_per_rate, i = 0, rix, high_rix; int is_probe = 0; if (rate_control_send_low(sta, priv_sta, txrc)) @@ -791,6 +791,7 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta, rate_table = ath_rc_priv->rate_table; rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table, &is_probe, false); + high_rix = rix; /* * If we're in HT mode and both us and our peer supports LDPC. @@ -838,16 +839,16 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta, try_per_rate = 8; /* - * If the last rate in the rate series is MCS and has - * more than 80% of per thresh, then use a legacy rate - * as last retry to ensure that the frame is tried in both - * MCS and legacy rate. + * Use a legacy rate as last retry to ensure that the frame + * is tried in both MCS and legacy rates. */ - ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix); - if (WLAN_RC_PHY_HT(rate_table->info[rix].phy) && - (ath_rc_priv->per[rix] > 45)) + if ((rates[2].flags & IEEE80211_TX_RC_MCS) && + (!(tx_info->flags & IEEE80211_TX_CTL_AMPDU) || + (ath_rc_priv->per[high_rix] > 45))) rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table, &is_probe, true); + else + ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix); /* All other rates in the series have RTS enabled */ ath_rc_rate_set_series(rate_table, &rates[i], txrc, diff --git a/trunk/drivers/net/wireless/ath/ath9k/reg.h b/trunk/drivers/net/wireless/ath/ath9k/reg.h index 75acefbd4937..560d6effac7a 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/reg.h +++ b/trunk/drivers/net/wireless/ath/ath9k/reg.h @@ -2098,8 +2098,8 @@ enum { #define AR_MCI_CONT_STATUS 0x1848 #define AR_MCI_CONT_RSSI_POWER 0x000000FF #define AR_MCI_CONT_RSSI_POWER_S 0 -#define AR_MCI_CONT_PRIORITY 0x0000FF00 -#define AR_MCI_CONT_PRIORITY_S 8 +#define AR_MCI_CONT_RRIORITY 0x0000FF00 +#define AR_MCI_CONT_RRIORITY_S 8 #define AR_MCI_CONT_TXRX 0x00010000 #define AR_MCI_CONT_TXRX_S 16 diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/trunk/drivers/net/wireless/brcm80211/brcmfmac/Makefile index 9d5170b6df50..abb48032753b 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/Makefile +++ b/trunk/drivers/net/wireless/brcm80211/brcmfmac/Makefile @@ -34,5 +34,3 @@ brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \ sdio_chip.o brcmfmac-$(CONFIG_BRCMFMAC_USB) += \ usb.o -brcmfmac-$(CONFIG_BRCMDBG) += \ - dhd_dbg.o \ No newline at end of file diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd.h index a11fe54f5950..9f637014486e 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd.h +++ b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd.h @@ -613,9 +613,6 @@ struct brcmf_pub { struct work_struct multicast_work; u8 macvalue[ETH_ALEN]; atomic_t pend_8021x_cnt; -#ifdef DEBUG - struct dentry *dbgfs_dir; -#endif }; struct brcmf_if_event { diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c deleted file mode 100644 index 7f89540b56da..000000000000 --- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Copyright (c) 2012 Broadcom Corporation - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ -#include -#include -#include -#include -#include - -#include -#include -#include -#include "dhd.h" -#include "dhd_bus.h" -#include "dhd_dbg.h" - -static struct dentry *root_folder; - -void brcmf_debugfs_init(void) -{ - root_folder = debugfs_create_dir(KBUILD_MODNAME, NULL); - if (IS_ERR(root_folder)) - root_folder = NULL; -} - -void brcmf_debugfs_exit(void) -{ - if (!root_folder) - return; - - debugfs_remove_recursive(root_folder); - root_folder = NULL; -} - -int brcmf_debugfs_attach(struct brcmf_pub *drvr) -{ - if (!root_folder) - return -ENODEV; - - drvr->dbgfs_dir = debugfs_create_dir(dev_name(drvr->dev), root_folder); - return PTR_RET(drvr->dbgfs_dir); -} - -void brcmf_debugfs_detach(struct brcmf_pub *drvr) -{ - if (!IS_ERR_OR_NULL(drvr->dbgfs_dir)) - debugfs_remove_recursive(drvr->dbgfs_dir); -} - -struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr) -{ - return drvr->dbgfs_dir; -} - -static -ssize_t brcmf_debugfs_sdio_counter_read(struct file *f, char __user *data, - size_t count, loff_t *ppos) -{ - struct brcmf_sdio_count *sdcnt = f->private_data; - char buf[750]; - int res; - - /* only allow read from start */ - if (*ppos > 0) - return 0; - - res = scnprintf(buf, sizeof(buf), - "intrcount: %u\nlastintrs: %u\n" - "pollcnt: %u\nregfails: %u\n" - "tx_sderrs: %u\nfcqueued: %u\n" - "rxrtx: %u\nrx_toolong: %u\n" - "rxc_errors: %u\nrx_hdrfail: %u\n" - "rx_badhdr: %u\nrx_badseq: %u\n" - "fc_rcvd: %u\nfc_xoff: %u\n" - "fc_xon: %u\nrxglomfail: %u\n" - "rxglomframes: %u\nrxglompkts: %u\n" - "f2rxhdrs: %u\nf2rxdata: %u\n" - "f2txdata: %u\nf1regdata: %u\n" - "tickcnt: %u\ntx_ctlerrs: %lu\n" - "tx_ctlpkts: %lu\nrx_ctlerrs: %lu\n" - "rx_ctlpkts: %lu\nrx_readahead: %lu\n", - sdcnt->intrcount, sdcnt->lastintrs, - sdcnt->pollcnt, sdcnt->regfails, - sdcnt->tx_sderrs, sdcnt->fcqueued, - sdcnt->rxrtx, sdcnt->rx_toolong, - sdcnt->rxc_errors, sdcnt->rx_hdrfail, - sdcnt->rx_badhdr, sdcnt->rx_badseq, - sdcnt->fc_rcvd, sdcnt->fc_xoff, - sdcnt->fc_xon, sdcnt->rxglomfail, - sdcnt->rxglomframes, sdcnt->rxglompkts, - sdcnt->f2rxhdrs, sdcnt->f2rxdata, - sdcnt->f2txdata, sdcnt->f1regdata, - sdcnt->tickcnt, sdcnt->tx_ctlerrs, - sdcnt->tx_ctlpkts, sdcnt->rx_ctlerrs, - sdcnt->rx_ctlpkts, sdcnt->rx_readahead_cnt); - - return simple_read_from_buffer(data, count, ppos, buf, res); -} - -static const struct file_operations brcmf_debugfs_sdio_counter_ops = { - .owner = THIS_MODULE, - .open = simple_open, - .read = brcmf_debugfs_sdio_counter_read -}; - -void brcmf_debugfs_create_sdio_count(struct brcmf_pub *drvr, - struct brcmf_sdio_count *sdcnt) -{ - struct dentry *dentry = drvr->dbgfs_dir; - - if (!IS_ERR_OR_NULL(dentry)) - debugfs_create_file("counters", S_IRUGO, dentry, - sdcnt, &brcmf_debugfs_sdio_counter_ops); -} diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h index b784920532d3..a2c4576cf9ff 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h +++ b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h @@ -76,63 +76,4 @@ do { \ extern int brcmf_msg_level; -/* - * hold counter variables used in brcmfmac sdio driver. - */ -struct brcmf_sdio_count { - uint intrcount; /* Count of device interrupt callbacks */ - uint lastintrs; /* Count as of last watchdog timer */ - uint pollcnt; /* Count of active polls */ - uint regfails; /* Count of R_REG failures */ - uint tx_sderrs; /* Count of tx attempts with sd errors */ - uint fcqueued; /* Tx packets that got queued */ - uint rxrtx; /* Count of rtx requests (NAK to dongle) */ - uint rx_toolong; /* Receive frames too long to receive */ - uint rxc_errors; /* SDIO errors when reading control frames */ - uint rx_hdrfail; /* SDIO errors on header reads */ - uint rx_badhdr; /* Bad received headers (roosync?) */ - uint rx_badseq; /* Mismatched rx sequence number */ - uint fc_rcvd; /* Number of flow-control events received */ - uint fc_xoff; /* Number which turned on flow-control */ - uint fc_xon; /* Number which turned off flow-control */ - uint rxglomfail; /* Failed deglom attempts */ - uint rxglomframes; /* Number of glom frames (superframes) */ - uint rxglompkts; /* Number of packets from glom frames */ - uint f2rxhdrs; /* Number of header reads */ - uint f2rxdata; /* Number of frame data reads */ - uint f2txdata; /* Number of f2 frame writes */ - uint f1regdata; /* Number of f1 register accesses */ - uint tickcnt; /* Number of watchdog been schedule */ - ulong tx_ctlerrs; /* Err of sending ctrl frames */ - ulong tx_ctlpkts; /* Ctrl frames sent to dongle */ - ulong rx_ctlerrs; /* Err of processing rx ctrl frames */ - ulong rx_ctlpkts; /* Ctrl frames processed from dongle */ - ulong rx_readahead_cnt; /* packets where header read-ahead was used */ -}; - -struct brcmf_pub; -#ifdef DEBUG -void brcmf_debugfs_init(void); -void brcmf_debugfs_exit(void); -int brcmf_debugfs_attach(struct brcmf_pub *drvr); -void brcmf_debugfs_detach(struct brcmf_pub *drvr); -struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr); -void brcmf_debugfs_create_sdio_count(struct brcmf_pub *drvr, - struct brcmf_sdio_count *sdcnt); -#else -static inline void brcmf_debugfs_init(void) -{ -} -static inline void brcmf_debugfs_exit(void) -{ -} -static inline int brcmf_debugfs_attach(struct brcmf_pub *drvr) -{ - return 0; -} -static inline void brcmf_debugfs_detach(struct brcmf_pub *drvr) -{ -} -#endif - #endif /* _BRCMF_DBG_H_ */ diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c index 01cf6c03390b..8933f9b31a9a 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c +++ b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c @@ -1007,9 +1007,6 @@ int brcmf_attach(uint bus_hdrlen, struct device *dev) drvr->bus_if->drvr = drvr; drvr->dev = dev; - /* create device debugfs folder */ - brcmf_debugfs_attach(drvr); - /* Attach and link in the protocol */ ret = brcmf_proto_attach(drvr); if (ret != 0) { @@ -1126,7 +1123,6 @@ void brcmf_detach(struct device *dev) brcmf_proto_detach(drvr); } - brcmf_debugfs_detach(drvr); bus_if->drvr = NULL; kfree(drvr); } @@ -1196,8 +1192,6 @@ int brcmf_write_to_file(struct brcmf_pub *drvr, const u8 *buf, int size) static void brcmf_driver_init(struct work_struct *work) { - brcmf_debugfs_init(); - #ifdef CONFIG_BRCMFMAC_SDIO brcmf_sdio_init(); #endif @@ -1225,7 +1219,6 @@ static void __exit brcmfmac_module_exit(void) #ifdef CONFIG_BRCMFMAC_USB brcmf_usb_exit(); #endif - brcmf_debugfs_exit(); } module_init(brcmfmac_module_init); diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c index b023766954a6..4deae28fc211 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c +++ b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c @@ -502,9 +502,12 @@ struct brcmf_sdio { bool intr; /* Use interrupts */ bool poll; /* Use polling */ bool ipend; /* Device interrupt is pending */ + uint intrcount; /* Count of device interrupt callbacks */ + uint lastintrs; /* Count as of last watchdog timer */ uint spurious; /* Count of spurious interrupts */ uint pollrate; /* Ticks between device polls */ uint polltick; /* Tick counter */ + uint pollcnt; /* Count of active polls */ #ifdef DEBUG uint console_interval; @@ -512,6 +515,8 @@ struct brcmf_sdio { uint console_addr; /* Console address from shared struct */ #endif /* DEBUG */ + uint regfails; /* Count of R_REG failures */ + uint clkstate; /* State of sd and backplane clock(s) */ bool activity; /* Activity flag for clock down */ s32 idletime; /* Control for activity timeout */ @@ -526,6 +531,33 @@ struct brcmf_sdio { /* Field to decide if rx of control frames happen in rxbuf or lb-pool */ bool usebufpool; + /* Some additional counters */ + uint tx_sderrs; /* Count of tx attempts with sd errors */ + uint fcqueued; /* Tx packets that got queued */ + uint rxrtx; /* Count of rtx requests (NAK to dongle) */ + uint rx_toolong; /* Receive frames too long to receive */ + uint rxc_errors; /* SDIO errors when reading control frames */ + uint rx_hdrfail; /* SDIO errors on header reads */ + uint rx_badhdr; /* Bad received headers (roosync?) */ + uint rx_badseq; /* Mismatched rx sequence number */ + uint fc_rcvd; /* Number of flow-control events received */ + uint fc_xoff; /* Number which turned on flow-control */ + uint fc_xon; /* Number which turned off flow-control */ + uint rxglomfail; /* Failed deglom attempts */ + uint rxglomframes; /* Number of glom frames (superframes) */ + uint rxglompkts; /* Number of packets from glom frames */ + uint f2rxhdrs; /* Number of header reads */ + uint f2rxdata; /* Number of frame data reads */ + uint f2txdata; /* Number of f2 frame writes */ + uint f1regdata; /* Number of f1 register accesses */ + uint tickcnt; /* Number of watchdog been schedule */ + unsigned long tx_ctlerrs; /* Err of sending ctrl frames */ + unsigned long tx_ctlpkts; /* Ctrl frames sent to dongle */ + unsigned long rx_ctlerrs; /* Err of processing rx ctrl frames */ + unsigned long rx_ctlpkts; /* Ctrl frames processed from dongle */ + unsigned long rx_readahead_cnt; /* Number of packets where header + * read-ahead was used. */ + u8 *ctrl_frame_buf; u32 ctrl_frame_len; bool ctrl_frame_stat; @@ -551,7 +583,6 @@ struct brcmf_sdio { u32 fw_ptr; bool txoff; /* Transmit flow-controlled */ - struct brcmf_sdio_count sdcnt; }; /* clkstate */ @@ -914,7 +945,7 @@ static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus) if (ret == 0) w_sdreg32(bus, SMB_INT_ACK, offsetof(struct sdpcmd_regs, tosbmailbox)); - bus->sdcnt.f1regdata += 2; + bus->f1regdata += 2; /* Dongle recomposed rx frames, accept them again */ if (hmb_data & HMB_DATA_NAKHANDLED) { @@ -953,12 +984,12 @@ static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus) HMB_DATA_FCDATA_SHIFT; if (fcbits & ~bus->flowcontrol) - bus->sdcnt.fc_xoff++; + bus->fc_xoff++; if (bus->flowcontrol & ~fcbits) - bus->sdcnt.fc_xon++; + bus->fc_xon++; - bus->sdcnt.fc_rcvd++; + bus->fc_rcvd++; bus->flowcontrol = fcbits; } @@ -990,7 +1021,7 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx) brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL, SFC_RF_TERM, &err); - bus->sdcnt.f1regdata++; + bus->f1regdata++; /* Wait until the packet has been flushed (device/FIFO stable) */ for (lastrbc = retries = 0xffff; retries > 0; retries--) { @@ -998,7 +1029,7 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx) SBSDIO_FUNC1_RFRAMEBCHI, &err); lo = brcmf_sdio_regrb(bus->sdiodev, SBSDIO_FUNC1_RFRAMEBCLO, &err); - bus->sdcnt.f1regdata += 2; + bus->f1regdata += 2; if ((hi == 0) && (lo == 0)) break; @@ -1016,11 +1047,11 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx) brcmf_dbg(INFO, "flush took %d iterations\n", 0xffff - retries); if (rtx) { - bus->sdcnt.rxrtx++; + bus->rxrtx++; err = w_sdreg32(bus, SMB_NAK, offsetof(struct sdpcmd_regs, tosbmailbox)); - bus->sdcnt.f1regdata++; + bus->f1regdata++; if (err == 0) bus->rxskip = true; } @@ -1212,7 +1243,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq) dlen); errcode = -1; } - bus->sdcnt.f2rxdata++; + bus->f2rxdata++; /* On failure, kill the superframe, allow a couple retries */ if (errcode < 0) { @@ -1225,7 +1256,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq) } else { bus->glomerr = 0; brcmf_sdbrcm_rxfail(bus, true, false); - bus->sdcnt.rxglomfail++; + bus->rxglomfail++; brcmf_sdbrcm_free_glom(bus); } return 0; @@ -1281,7 +1312,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq) if (rxseq != seq) { brcmf_dbg(INFO, "(superframe) rx_seq %d, expected %d\n", seq, rxseq); - bus->sdcnt.rx_badseq++; + bus->rx_badseq++; rxseq = seq; } @@ -1345,7 +1376,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq) } else { bus->glomerr = 0; brcmf_sdbrcm_rxfail(bus, true, false); - bus->sdcnt.rxglomfail++; + bus->rxglomfail++; brcmf_sdbrcm_free_glom(bus); } bus->nextlen = 0; @@ -1371,7 +1402,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq) if (rxseq != seq) { brcmf_dbg(GLOM, "rx_seq %d, expected %d\n", seq, rxseq); - bus->sdcnt.rx_badseq++; + bus->rx_badseq++; rxseq = seq; } rxseq++; @@ -1410,8 +1441,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq) down(&bus->sdsem); } - bus->sdcnt.rxglomframes++; - bus->sdcnt.rxglompkts += bus->glom.qlen; + bus->rxglomframes++; + bus->rxglompkts += bus->glom.qlen; } return num; } @@ -1495,7 +1526,7 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff) brcmf_dbg(ERROR, "%d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n", len, len - doff, bus->sdiodev->bus_if->maxctl); bus->sdiodev->bus_if->dstats.rx_errors++; - bus->sdcnt.rx_toolong++; + bus->rx_toolong++; brcmf_sdbrcm_rxfail(bus, false, false); goto done; } @@ -1505,13 +1536,13 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff) bus->sdiodev->sbwad, SDIO_FUNC_2, F2SYNC, (bus->rxctl + BRCMF_FIRSTREAD), rdlen); - bus->sdcnt.f2rxdata++; + bus->f2rxdata++; /* Control frame failures need retransmission */ if (sdret < 0) { brcmf_dbg(ERROR, "read %d control bytes failed: %d\n", rdlen, sdret); - bus->sdcnt.rxc_errors++; + bus->rxc_errors++; brcmf_sdbrcm_rxfail(bus, true, true); goto done; } @@ -1558,7 +1589,7 @@ brcmf_alloc_pkt_and_read(struct brcmf_sdio *bus, u16 rdlen, /* Read the entire frame */ sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad, SDIO_FUNC_2, F2SYNC, *pkt); - bus->sdcnt.f2rxdata++; + bus->f2rxdata++; if (sdret < 0) { brcmf_dbg(ERROR, "(nextlen): read %d bytes failed: %d\n", @@ -1599,7 +1630,7 @@ brcmf_check_rxbuf(struct brcmf_sdio *bus, struct sk_buff *pkt, u8 *rxbuf, if ((u16)~(*len ^ check)) { brcmf_dbg(ERROR, "(nextlen): HW hdr error: nextlen/len/check 0x%04x/0x%04x/0x%04x\n", nextlen, *len, check); - bus->sdcnt.rx_badhdr++; + bus->rx_badhdr++; brcmf_sdbrcm_rxfail(bus, false, false); goto fail; } @@ -1715,7 +1746,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished) bus->nextlen = 0; } - bus->sdcnt.rx_readahead_cnt++; + bus->rx_readahead_cnt++; /* Handle Flow Control */ fcbits = SDPCM_FCMASK_VALUE( @@ -1723,12 +1754,12 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished) if (bus->flowcontrol != fcbits) { if (~bus->flowcontrol & fcbits) - bus->sdcnt.fc_xoff++; + bus->fc_xoff++; if (bus->flowcontrol & ~fcbits) - bus->sdcnt.fc_xon++; + bus->fc_xon++; - bus->sdcnt.fc_rcvd++; + bus->fc_rcvd++; bus->flowcontrol = fcbits; } @@ -1736,7 +1767,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished) if (rxseq != seq) { brcmf_dbg(INFO, "(nextlen): rx_seq %d, expected %d\n", seq, rxseq); - bus->sdcnt.rx_badseq++; + bus->rx_badseq++; rxseq = seq; } @@ -1783,11 +1814,11 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished) sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad, SDIO_FUNC_2, F2SYNC, bus->rxhdr, BRCMF_FIRSTREAD); - bus->sdcnt.f2rxhdrs++; + bus->f2rxhdrs++; if (sdret < 0) { brcmf_dbg(ERROR, "RXHEADER FAILED: %d\n", sdret); - bus->sdcnt.rx_hdrfail++; + bus->rx_hdrfail++; brcmf_sdbrcm_rxfail(bus, true, true); continue; } @@ -1809,7 +1840,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished) if ((u16) ~(len ^ check)) { brcmf_dbg(ERROR, "HW hdr err: len/check 0x%04x/0x%04x\n", len, check); - bus->sdcnt.rx_badhdr++; + bus->rx_badhdr++; brcmf_sdbrcm_rxfail(bus, false, false); continue; } @@ -1830,7 +1861,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished) if ((doff < SDPCM_HDRLEN) || (doff > len)) { brcmf_dbg(ERROR, "Bad data offset %d: HW len %d, min %d seq %d\n", doff, len, SDPCM_HDRLEN, seq); - bus->sdcnt.rx_badhdr++; + bus->rx_badhdr++; brcmf_sdbrcm_rxfail(bus, false, false); continue; } @@ -1849,19 +1880,19 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished) if (bus->flowcontrol != fcbits) { if (~bus->flowcontrol & fcbits) - bus->sdcnt.fc_xoff++; + bus->fc_xoff++; if (bus->flowcontrol & ~fcbits) - bus->sdcnt.fc_xon++; + bus->fc_xon++; - bus->sdcnt.fc_rcvd++; + bus->fc_rcvd++; bus->flowcontrol = fcbits; } /* Check and update sequence number */ if (rxseq != seq) { brcmf_dbg(INFO, "rx_seq %d, expected %d\n", seq, rxseq); - bus->sdcnt.rx_badseq++; + bus->rx_badseq++; rxseq = seq; } @@ -1906,7 +1937,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished) brcmf_dbg(ERROR, "too long: len %d rdlen %d\n", len, rdlen); bus->sdiodev->bus_if->dstats.rx_errors++; - bus->sdcnt.rx_toolong++; + bus->rx_toolong++; brcmf_sdbrcm_rxfail(bus, false, false); continue; } @@ -1929,7 +1960,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished) /* Read the remaining frame data */ sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad, SDIO_FUNC_2, F2SYNC, pkt); - bus->sdcnt.f2rxdata++; + bus->f2rxdata++; if (sdret < 0) { brcmf_dbg(ERROR, "read %d %s bytes failed: %d\n", rdlen, @@ -2116,18 +2147,18 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt, ret = brcmf_sdcard_send_pkt(bus->sdiodev, bus->sdiodev->sbwad, SDIO_FUNC_2, F2SYNC, pkt); - bus->sdcnt.f2txdata++; + bus->f2txdata++; if (ret < 0) { /* On failure, abort the command and terminate the frame */ brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n", ret); - bus->sdcnt.tx_sderrs++; + bus->tx_sderrs++; brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2); brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL, SFC_WF_TERM, NULL); - bus->sdcnt.f1regdata++; + bus->f1regdata++; for (i = 0; i < 3; i++) { u8 hi, lo; @@ -2135,7 +2166,7 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt, SBSDIO_FUNC1_WFRAMEBCHI, NULL); lo = brcmf_sdio_regrb(bus->sdiodev, SBSDIO_FUNC1_WFRAMEBCLO, NULL); - bus->sdcnt.f1regdata += 2; + bus->f1regdata += 2; if ((hi == 0) && (lo == 0)) break; } @@ -2193,7 +2224,7 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes) ret = r_sdreg32(bus, &intstatus, offsetof(struct sdpcmd_regs, intstatus)); - bus->sdcnt.f2txdata++; + bus->f2txdata++; if (ret != 0) break; if (intstatus & bus->hostintmask) @@ -2386,7 +2417,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus) bus->ipend = false; err = r_sdreg32(bus, &newstatus, offsetof(struct sdpcmd_regs, intstatus)); - bus->sdcnt.f1regdata++; + bus->f1regdata++; if (err != 0) newstatus = 0; newstatus &= bus->hostintmask; @@ -2395,7 +2426,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus) err = w_sdreg32(bus, newstatus, offsetof(struct sdpcmd_regs, intstatus)); - bus->sdcnt.f1regdata++; + bus->f1regdata++; } } @@ -2414,7 +2445,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus) err = r_sdreg32(bus, &newstatus, offsetof(struct sdpcmd_regs, intstatus)); - bus->sdcnt.f1regdata += 2; + bus->f1regdata += 2; bus->fcstate = !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE)); intstatus |= (newstatus & bus->hostintmask); @@ -2479,13 +2510,13 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus) terminate the frame */ brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n", ret); - bus->sdcnt.tx_sderrs++; + bus->tx_sderrs++; brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2); brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL, SFC_WF_TERM, &err); - bus->sdcnt.f1regdata++; + bus->f1regdata++; for (i = 0; i < 3; i++) { u8 hi, lo; @@ -2495,7 +2526,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus) lo = brcmf_sdio_regrb(bus->sdiodev, SBSDIO_FUNC1_WFRAMEBCLO, &err); - bus->sdcnt.f1regdata += 2; + bus->f1regdata += 2; if ((hi == 0) && (lo == 0)) break; } @@ -2626,7 +2657,7 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt) /* Check for existing queue, current flow-control, pending event, or pending clock */ brcmf_dbg(TRACE, "deferring pktq len %d\n", pktq_len(&bus->txq)); - bus->sdcnt.fcqueued++; + bus->fcqueued++; /* Priority based enq */ spin_lock_bh(&bus->txqlock); @@ -2814,13 +2845,13 @@ static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len) /* On failure, abort the command and terminate the frame */ brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n", ret); - bus->sdcnt.tx_sderrs++; + bus->tx_sderrs++; brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2); brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL, SFC_WF_TERM, NULL); - bus->sdcnt.f1regdata++; + bus->f1regdata++; for (i = 0; i < 3; i++) { u8 hi, lo; @@ -2828,7 +2859,7 @@ static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len) SBSDIO_FUNC1_WFRAMEBCHI, NULL); lo = brcmf_sdio_regrb(bus->sdiodev, SBSDIO_FUNC1_WFRAMEBCLO, NULL); - bus->sdcnt.f1regdata += 2; + bus->f1regdata += 2; if (hi == 0 && lo == 0) break; } @@ -2945,26 +2976,13 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen) up(&bus->sdsem); if (ret) - bus->sdcnt.tx_ctlerrs++; + bus->tx_ctlerrs++; else - bus->sdcnt.tx_ctlpkts++; + bus->tx_ctlpkts++; return ret ? -EIO : 0; } -#ifdef DEBUG -static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus) -{ - struct brcmf_pub *drvr = bus->sdiodev->bus_if->drvr; - - brcmf_debugfs_create_sdio_count(drvr, &bus->sdcnt); -} -#else -static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus) -{ -} -#endif /* DEBUG */ - static int brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen) { @@ -2999,9 +3017,9 @@ brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen) } if (rxlen) - bus->sdcnt.rx_ctlpkts++; + bus->rx_ctlpkts++; else - bus->sdcnt.rx_ctlerrs++; + bus->rx_ctlerrs++; return rxlen ? (int)rxlen : -ETIMEDOUT; } @@ -3401,7 +3419,7 @@ static int brcmf_sdbrcm_bus_init(struct device *dev) return 0; /* Start the watchdog timer */ - bus->sdcnt.tickcnt = 0; + bus->tickcnt = 0; brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS); down(&bus->sdsem); @@ -3494,7 +3512,7 @@ void brcmf_sdbrcm_isr(void *arg) return; } /* Count the interrupt call */ - bus->sdcnt.intrcount++; + bus->intrcount++; bus->ipend = true; /* Shouldn't get this interrupt if we're sleeping? */ @@ -3536,8 +3554,7 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus) bus->polltick = 0; /* Check device if no interrupts */ - if (!bus->intr || - (bus->sdcnt.intrcount == bus->sdcnt.lastintrs)) { + if (!bus->intr || (bus->intrcount == bus->lastintrs)) { if (!bus->dpc_sched) { u8 devpend; @@ -3552,7 +3569,7 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus) /* If there is something, make like the ISR and schedule the DPC */ if (intstatus) { - bus->sdcnt.pollcnt++; + bus->pollcnt++; bus->ipend = true; bus->dpc_sched = true; @@ -3564,7 +3581,7 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus) } /* Update interrupt tracking */ - bus->sdcnt.lastintrs = bus->sdcnt.intrcount; + bus->lastintrs = bus->intrcount; } #ifdef DEBUG /* Poll for console output periodically */ @@ -3776,7 +3793,7 @@ brcmf_sdbrcm_watchdog_thread(void *data) if (!wait_for_completion_interruptible(&bus->watchdog_wait)) { brcmf_sdbrcm_bus_watchdog(bus); /* Count the tick for reference */ - bus->sdcnt.tickcnt++; + bus->tickcnt++; } else break; } @@ -3817,6 +3834,7 @@ static void brcmf_sdbrcm_release_dongle(struct brcmf_sdio *bus) static void brcmf_sdbrcm_release(struct brcmf_sdio *bus) { brcmf_dbg(TRACE, "Enter\n"); + if (bus) { /* De-register interrupt handler */ brcmf_sdio_intr_unregister(bus->sdiodev); @@ -3920,7 +3938,6 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev) goto fail; } - brcmf_sdio_debugfs_create(bus); brcmf_dbg(INFO, "completed!!\n"); /* if firmware path present try to download and bring up bus */ diff --git a/trunk/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c b/trunk/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c index 01b190a25d94..95b5902bc4b3 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c +++ b/trunk/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c @@ -735,8 +735,10 @@ brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi, * a candidate for aggregation */ p = pktq_ppeek(&qi->q, prec); + /* tx_info must be checked with current p */ + tx_info = IEEE80211_SKB_CB(p); + if (p) { - tx_info = IEEE80211_SKB_CB(p); if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && ((u8) (p->priority) == tid)) { plen = p->len + AMPDU_MAX_MPDU_OVERHEAD; @@ -757,7 +759,6 @@ brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi, p = NULL; continue; } - /* next packet fit for aggregation so dequeue */ p = brcmu_pktq_pdeq(&qi->q, prec); } else { p = NULL; diff --git a/trunk/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/trunk/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c index 341e06a0d6ec..50f92a0b7c41 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c +++ b/trunk/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c @@ -721,6 +721,14 @@ static const struct ieee80211_ops brcms_ops = { .flush = brcms_ops_flush, }; +/* + * is called in brcms_bcma_probe() context, therefore no locking required. + */ +static int brcms_set_hint(struct brcms_info *wl, char *abbrev) +{ + return regulatory_hint(wl->pub->ieee_hw->wiphy, abbrev); +} + void brcms_dpc(unsigned long data) { struct brcms_info *wl; @@ -1060,9 +1068,9 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev) wiphy_err(wl->wiphy, "%s: ieee80211_register_hw failed, status" "%d\n", __func__, err); - if (wl->pub->srom_ccode[0] && - regulatory_hint(wl->wiphy, wl->pub->srom_ccode)) - wiphy_err(wl->wiphy, "%s: regulatory hint failed\n", __func__); + if (wl->pub->srom_ccode[0] && brcms_set_hint(wl, wl->pub->srom_ccode)) + wiphy_err(wl->wiphy, "%s: regulatory_hint failed, status %d\n", + __func__, err); n_adapters_found++; return wl; diff --git a/trunk/drivers/net/wireless/iwlwifi/Makefile b/trunk/drivers/net/wireless/iwlwifi/Makefile index 170ec330d2a9..98c8f6449649 100644 --- a/trunk/drivers/net/wireless/iwlwifi/Makefile +++ b/trunk/drivers/net/wireless/iwlwifi/Makefile @@ -1,3 +1,7 @@ +obj-$(CONFIG_IWLDVM) += dvm/ + +CFLAGS_iwl-devtrace.o := -I$(src) + # common obj-$(CONFIG_IWLWIFI) += iwlwifi.o iwlwifi-objs += iwl-io.o @@ -9,11 +13,5 @@ iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o iwlwifi-objs += pcie/1000.o pcie/2000.o pcie/5000.o pcie/6000.o iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o -iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-test.o ccflags-y += -D__CHECK_ENDIAN__ -I$(src) - - -obj-$(CONFIG_IWLDVM) += dvm/ - -CFLAGS_iwl-devtrace.o := -I$(src) diff --git a/trunk/drivers/net/wireless/iwlwifi/dvm/agn.h b/trunk/drivers/net/wireless/iwlwifi/dvm/agn.h index 6d102413dd94..2ae3608472a6 100644 --- a/trunk/drivers/net/wireless/iwlwifi/dvm/agn.h +++ b/trunk/drivers/net/wireless/iwlwifi/dvm/agn.h @@ -395,10 +395,8 @@ static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags) } extern int iwl_alive_start(struct iwl_priv *priv); - -/* testmode support */ +/* svtool */ #ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE - extern int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len); extern int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, @@ -406,16 +404,13 @@ extern int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct netlink_callback *cb, void *data, int len); extern void iwl_testmode_init(struct iwl_priv *priv); -extern void iwl_testmode_free(struct iwl_priv *priv); - +extern void iwl_testmode_cleanup(struct iwl_priv *priv); #else - static inline int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len) { return -ENOSYS; } - static inline int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb, struct netlink_callback *cb, @@ -423,12 +418,12 @@ int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb, { return -ENOSYS; } - -static inline void iwl_testmode_init(struct iwl_priv *priv) +static inline +void iwl_testmode_init(struct iwl_priv *priv) { } - -static inline void iwl_testmode_free(struct iwl_priv *priv) +static inline +void iwl_testmode_cleanup(struct iwl_priv *priv) { } #endif diff --git a/trunk/drivers/net/wireless/iwlwifi/dvm/dev.h b/trunk/drivers/net/wireless/iwlwifi/dvm/dev.h index 4620b657948a..89f2e1040e7f 100644 --- a/trunk/drivers/net/wireless/iwlwifi/dvm/dev.h +++ b/trunk/drivers/net/wireless/iwlwifi/dvm/dev.h @@ -52,8 +52,6 @@ #include "rs.h" #include "tt.h" -#include "iwl-test.h" - /* CT-KILL constants */ #define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */ #define CT_KILL_THRESHOLD 114 /* in Celsius */ @@ -598,6 +596,24 @@ struct iwl_lib_ops { void (*temperature)(struct iwl_priv *priv); }; +#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE +struct iwl_testmode_trace { + u32 buff_size; + u32 total_size; + u32 num_chunks; + u8 *cpu_addr; + u8 *trace_addr; + dma_addr_t dma_addr; + bool trace_enabled; +}; +struct iwl_testmode_mem { + u32 buff_size; + u32 num_chunks; + u8 *buff_addr; + bool read_in_progress; +}; +#endif + struct iwl_wipan_noa_data { struct rcu_head rcu_head; u32 length; @@ -654,6 +670,8 @@ struct iwl_priv { enum ieee80211_band band; u8 valid_contexts; + void (*pre_rx_handler)(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb); int (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, struct iwl_device_cmd *cmd); @@ -877,9 +895,9 @@ struct iwl_priv { struct led_classdev led; unsigned long blink_on, blink_off; bool led_registered; - #ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE - struct iwl_test tst; + struct iwl_testmode_trace testmode_trace; + struct iwl_testmode_mem testmode_mem; u32 tm_fixed_rate; #endif diff --git a/trunk/drivers/net/wireless/iwlwifi/dvm/lib.c b/trunk/drivers/net/wireless/iwlwifi/dvm/lib.c index 76f259283c3a..cb1ca7a25dd5 100644 --- a/trunk/drivers/net/wireless/iwlwifi/dvm/lib.c +++ b/trunk/drivers/net/wireless/iwlwifi/dvm/lib.c @@ -1265,7 +1265,7 @@ int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) * the mutex, this ensures we don't try to send two * (or more) synchronous commands at a time. */ - if (!(cmd->flags & CMD_ASYNC)) + if (cmd->flags & CMD_SYNC) lockdep_assert_held(&priv->mutex); if (priv->ucode_owner == IWL_OWNERSHIP_TM && diff --git a/trunk/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/trunk/drivers/net/wireless/iwlwifi/dvm/mac80211.c index 9d2374862314..599e8b41f5a8 100644 --- a/trunk/drivers/net/wireless/iwlwifi/dvm/mac80211.c +++ b/trunk/drivers/net/wireless/iwlwifi/dvm/mac80211.c @@ -476,7 +476,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw) } if (priv->wowlan_sram) - _iwl_read_targ_mem_dwords( + _iwl_read_targ_mem_words( priv->trans, 0x800000, priv->wowlan_sram, img->sec[IWL_UCODE_SECTION_DATA].len / 4); diff --git a/trunk/drivers/net/wireless/iwlwifi/dvm/main.c b/trunk/drivers/net/wireless/iwlwifi/dvm/main.c index e620af3d592d..1c2d0233a405 100644 --- a/trunk/drivers/net/wireless/iwlwifi/dvm/main.c +++ b/trunk/drivers/net/wireless/iwlwifi/dvm/main.c @@ -406,7 +406,7 @@ static void iwl_continuous_event_trace(struct iwl_priv *priv) base = priv->device_pointers.log_event_table; if (iwlagn_hw_valid_rtc_data_addr(base)) { - iwl_read_targ_mem_bytes(priv->trans, base, &read, sizeof(read)); + iwl_read_targ_mem_words(priv->trans, base, &read, sizeof(read)); capacity = read.capacity; mode = read.mode; num_wraps = read.wrap_counter; @@ -1548,7 +1548,7 @@ static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode) iwl_dbgfs_unregister(priv); - iwl_testmode_free(priv); + iwl_testmode_cleanup(priv); iwlagn_mac_unregister(priv); iwl_tt_exit(priv); @@ -1671,7 +1671,7 @@ static void iwl_dump_nic_error_log(struct iwl_priv *priv) } /*TODO: Update dbgfs with ISR error stats obtained below */ - iwl_read_targ_mem_bytes(trans, base, &table, sizeof(table)); + iwl_read_targ_mem_words(trans, base, &table, sizeof(table)); if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { IWL_ERR(trans, "Start IWL Error Log Dump:\n"); diff --git a/trunk/drivers/net/wireless/iwlwifi/dvm/rx.c b/trunk/drivers/net/wireless/iwlwifi/dvm/rx.c index c1f7a18e08dd..0ed90bb8b56a 100644 --- a/trunk/drivers/net/wireless/iwlwifi/dvm/rx.c +++ b/trunk/drivers/net/wireless/iwlwifi/dvm/rx.c @@ -1124,6 +1124,8 @@ int iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb, { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); + void (*pre_rx_handler)(struct iwl_priv *, + struct iwl_rx_cmd_buffer *); int err = 0; /* @@ -1133,19 +1135,19 @@ int iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb, */ iwl_notification_wait_notify(&priv->notif_wait, pkt); -#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE - /* - * RX data may be forwarded to userspace in one - * of two cases: the user owns the fw through testmode or when - * the user requested to monitor the rx w/o affecting the regular flow. - * In these cases the iwl_test object will handle forwarding the rx - * data to user space. - * Note that if the ownership flag != IWL_OWNERSHIP_TM the flow + /* RX data may be forwarded to userspace (using pre_rx_handler) in one + * of two cases: the first, that the user owns the uCode through + * testmode - in such case the pre_rx_handler is set and no further + * processing takes place. The other case is when the user want to + * monitor the rx w/o affecting the regular flow - the pre_rx_handler + * will be set but the ownership flag != IWL_OWNERSHIP_TM and the flow * continues. + * We need to use ACCESS_ONCE to prevent a case where the handler + * changes between the check and the call. */ - iwl_test_rx(&priv->tst, rxb); -#endif - + pre_rx_handler = ACCESS_ONCE(priv->pre_rx_handler); + if (pre_rx_handler) + pre_rx_handler(priv, rxb); if (priv->ucode_owner != IWL_OWNERSHIP_TM) { /* Based on type of command response or notification, * handle those that need handling via function in diff --git a/trunk/drivers/net/wireless/iwlwifi/dvm/testmode.c b/trunk/drivers/net/wireless/iwlwifi/dvm/testmode.c index 57b918ce3b5f..e08b1a383594 100644 --- a/trunk/drivers/net/wireless/iwlwifi/dvm/testmode.c +++ b/trunk/drivers/net/wireless/iwlwifi/dvm/testmode.c @@ -60,7 +60,6 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ - #include #include #include @@ -70,84 +69,355 @@ #include #include #include - #include "iwl-debug.h" +#include "iwl-io.h" #include "iwl-trans.h" +#include "iwl-fh.h" +#include "iwl-prph.h" #include "dev.h" #include "agn.h" -#include "iwl-test.h" -#include "iwl-testmode.h" +#include "testmode.h" + + +/* Periphery registers absolute lower bound. This is used in order to + * differentiate registery access through HBUS_TARG_PRPH_* and + * HBUS_TARG_MEM_* accesses. + */ +#define IWL_TM_ABS_PRPH_START (0xA00000) + +/* The TLVs used in the gnl message policy between the kernel module and + * user space application. iwl_testmode_gnl_msg_policy is to be carried + * through the NL80211_CMD_TESTMODE channel regulated by nl80211. + * See testmode.h + */ +static +struct nla_policy iwl_testmode_gnl_msg_policy[IWL_TM_ATTR_MAX] = { + [IWL_TM_ATTR_COMMAND] = { .type = NLA_U32, }, + + [IWL_TM_ATTR_UCODE_CMD_ID] = { .type = NLA_U8, }, + [IWL_TM_ATTR_UCODE_CMD_DATA] = { .type = NLA_UNSPEC, }, + + [IWL_TM_ATTR_REG_OFFSET] = { .type = NLA_U32, }, + [IWL_TM_ATTR_REG_VALUE8] = { .type = NLA_U8, }, + [IWL_TM_ATTR_REG_VALUE32] = { .type = NLA_U32, }, + + [IWL_TM_ATTR_SYNC_RSP] = { .type = NLA_UNSPEC, }, + [IWL_TM_ATTR_UCODE_RX_PKT] = { .type = NLA_UNSPEC, }, -static int iwl_testmode_send_cmd(struct iwl_op_mode *op_mode, - struct iwl_host_cmd *cmd) + [IWL_TM_ATTR_EEPROM] = { .type = NLA_UNSPEC, }, + + [IWL_TM_ATTR_TRACE_ADDR] = { .type = NLA_UNSPEC, }, + [IWL_TM_ATTR_TRACE_DUMP] = { .type = NLA_UNSPEC, }, + [IWL_TM_ATTR_TRACE_SIZE] = { .type = NLA_U32, }, + + [IWL_TM_ATTR_FIXRATE] = { .type = NLA_U32, }, + + [IWL_TM_ATTR_UCODE_OWNER] = { .type = NLA_U8, }, + + [IWL_TM_ATTR_MEM_ADDR] = { .type = NLA_U32, }, + [IWL_TM_ATTR_BUFFER_SIZE] = { .type = NLA_U32, }, + [IWL_TM_ATTR_BUFFER_DUMP] = { .type = NLA_UNSPEC, }, + + [IWL_TM_ATTR_FW_VERSION] = { .type = NLA_U32, }, + [IWL_TM_ATTR_DEVICE_ID] = { .type = NLA_U32, }, + [IWL_TM_ATTR_FW_TYPE] = { .type = NLA_U32, }, + [IWL_TM_ATTR_FW_INST_SIZE] = { .type = NLA_U32, }, + [IWL_TM_ATTR_FW_DATA_SIZE] = { .type = NLA_U32, }, + + [IWL_TM_ATTR_ENABLE_NOTIFICATION] = {.type = NLA_FLAG, }, +}; + +/* + * See the struct iwl_rx_packet in commands.h for the format of the + * received events from the device + */ +static inline int get_event_length(struct iwl_rx_cmd_buffer *rxb) { - struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); - return iwl_dvm_send_cmd(priv, cmd); + struct iwl_rx_packet *pkt = rxb_addr(rxb); + if (pkt) + return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; + else + return 0; } -static bool iwl_testmode_valid_hw_addr(u32 addr) + +/* + * This function multicasts the spontaneous messages from the device to the + * user space. It is invoked whenever there is a received messages + * from the device. This function is called within the ISR of the rx handlers + * in iwlagn driver. + * + * The parsing of the message content is left to the user space application, + * The message content is treated as unattacked raw data and is encapsulated + * with IWL_TM_ATTR_UCODE_RX_PKT multicasting to the user space. + * + * @priv: the instance of iwlwifi device + * @rxb: pointer to rx data content received by the ISR + * + * See the message policies and TLVs in iwl_testmode_gnl_msg_policy[]. + * For the messages multicasting to the user application, the mandatory + * TLV fields are : + * IWL_TM_ATTR_COMMAND must be IWL_TM_CMD_DEV2APP_UCODE_RX_PKT + * IWL_TM_ATTR_UCODE_RX_PKT for carrying the message content + */ + +static void iwl_testmode_ucode_rx_pkt(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) { - if (iwlagn_hw_valid_rtc_data_addr(addr)) - return true; + struct ieee80211_hw *hw = priv->hw; + struct sk_buff *skb; + void *data; + int length; - if (IWLAGN_RTC_INST_LOWER_BOUND <= addr && - addr < IWLAGN_RTC_INST_UPPER_BOUND) - return true; + data = rxb_addr(rxb); + length = get_event_length(rxb); - return false; -} + if (!data || length == 0) + return; -static u32 iwl_testmode_get_fw_ver(struct iwl_op_mode *op_mode) -{ - struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); - return priv->fw->ucode_ver; + skb = cfg80211_testmode_alloc_event_skb(hw->wiphy, 20 + length, + GFP_ATOMIC); + if (skb == NULL) { + IWL_ERR(priv, + "Run out of memory for messages to user space ?\n"); + return; + } + if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) || + /* the length doesn't include len_n_flags field, so add it manually */ + nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, length + sizeof(__le32), data)) + goto nla_put_failure; + cfg80211_testmode_event(skb, GFP_ATOMIC); + return; + +nla_put_failure: + kfree_skb(skb); + IWL_ERR(priv, "Ouch, overran buffer, check allocation!\n"); } -static struct sk_buff* -iwl_testmode_alloc_reply(struct iwl_op_mode *op_mode, int len) +void iwl_testmode_init(struct iwl_priv *priv) { - struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); - return cfg80211_testmode_alloc_reply_skb(priv->hw->wiphy, len); + priv->pre_rx_handler = NULL; + priv->testmode_trace.trace_enabled = false; + priv->testmode_mem.read_in_progress = false; } -static int iwl_testmode_reply(struct iwl_op_mode *op_mode, struct sk_buff *skb) +static void iwl_mem_cleanup(struct iwl_priv *priv) { - return cfg80211_testmode_reply(skb); + if (priv->testmode_mem.read_in_progress) { + kfree(priv->testmode_mem.buff_addr); + priv->testmode_mem.buff_addr = NULL; + priv->testmode_mem.buff_size = 0; + priv->testmode_mem.num_chunks = 0; + priv->testmode_mem.read_in_progress = false; + } } -static struct sk_buff *iwl_testmode_alloc_event(struct iwl_op_mode *op_mode, - int len) +static void iwl_trace_cleanup(struct iwl_priv *priv) { - struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); - return cfg80211_testmode_alloc_event_skb(priv->hw->wiphy, len, - GFP_ATOMIC); + if (priv->testmode_trace.trace_enabled) { + if (priv->testmode_trace.cpu_addr && + priv->testmode_trace.dma_addr) + dma_free_coherent(priv->trans->dev, + priv->testmode_trace.total_size, + priv->testmode_trace.cpu_addr, + priv->testmode_trace.dma_addr); + priv->testmode_trace.trace_enabled = false; + priv->testmode_trace.cpu_addr = NULL; + priv->testmode_trace.trace_addr = NULL; + priv->testmode_trace.dma_addr = 0; + priv->testmode_trace.buff_size = 0; + priv->testmode_trace.total_size = 0; + } } -static void iwl_testmode_event(struct iwl_op_mode *op_mode, struct sk_buff *skb) + +void iwl_testmode_cleanup(struct iwl_priv *priv) { - return cfg80211_testmode_event(skb, GFP_ATOMIC); + iwl_trace_cleanup(priv); + iwl_mem_cleanup(priv); } -static struct iwl_test_ops tst_ops = { - .send_cmd = iwl_testmode_send_cmd, - .valid_hw_addr = iwl_testmode_valid_hw_addr, - .get_fw_ver = iwl_testmode_get_fw_ver, - .alloc_reply = iwl_testmode_alloc_reply, - .reply = iwl_testmode_reply, - .alloc_event = iwl_testmode_alloc_event, - .event = iwl_testmode_event, -}; -void iwl_testmode_init(struct iwl_priv *priv) +/* + * This function handles the user application commands to the ucode. + * + * It retrieves the mandatory fields IWL_TM_ATTR_UCODE_CMD_ID and + * IWL_TM_ATTR_UCODE_CMD_DATA and calls to the handler to send the + * host command to the ucode. + * + * If any mandatory field is missing, -ENOMSG is replied to the user space + * application; otherwise, waits for the host command to be sent and checks + * the return code. In case or error, it is returned, otherwise a reply is + * allocated and the reply RX packet + * is returned. + * + * @hw: ieee80211_hw object that represents the device + * @tb: gnl message fields from the user space + */ +static int iwl_testmode_ucode(struct ieee80211_hw *hw, struct nlattr **tb) { - iwl_test_init(&priv->tst, priv->trans, &tst_ops); + struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); + struct iwl_host_cmd cmd; + struct iwl_rx_packet *pkt; + struct sk_buff *skb; + void *reply_buf; + u32 reply_len; + int ret; + bool cmd_want_skb; + + memset(&cmd, 0, sizeof(struct iwl_host_cmd)); + + if (!tb[IWL_TM_ATTR_UCODE_CMD_ID] || + !tb[IWL_TM_ATTR_UCODE_CMD_DATA]) { + IWL_ERR(priv, "Missing ucode command mandatory fields\n"); + return -ENOMSG; + } + + cmd.flags = CMD_ON_DEMAND | CMD_SYNC; + cmd_want_skb = nla_get_flag(tb[IWL_TM_ATTR_UCODE_CMD_SKB]); + if (cmd_want_skb) + cmd.flags |= CMD_WANT_SKB; + + cmd.id = nla_get_u8(tb[IWL_TM_ATTR_UCODE_CMD_ID]); + cmd.data[0] = nla_data(tb[IWL_TM_ATTR_UCODE_CMD_DATA]); + cmd.len[0] = nla_len(tb[IWL_TM_ATTR_UCODE_CMD_DATA]); + cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY; + IWL_DEBUG_INFO(priv, "testmode ucode command ID 0x%x, flags 0x%x," + " len %d\n", cmd.id, cmd.flags, cmd.len[0]); + + ret = iwl_dvm_send_cmd(priv, &cmd); + if (ret) { + IWL_ERR(priv, "Failed to send hcmd\n"); + return ret; + } + if (!cmd_want_skb) + return ret; + + /* Handling return of SKB to the user */ + pkt = cmd.resp_pkt; + if (!pkt) { + IWL_ERR(priv, "HCMD received a null response packet\n"); + return ret; + } + + reply_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; + skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, reply_len + 20); + reply_buf = kmalloc(reply_len, GFP_KERNEL); + if (!skb || !reply_buf) { + kfree_skb(skb); + kfree(reply_buf); + return -ENOMEM; + } + + /* The reply is in a page, that we cannot send to user space. */ + memcpy(reply_buf, &(pkt->hdr), reply_len); + iwl_free_resp(&cmd); + + if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) || + nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, reply_len, reply_buf)) + goto nla_put_failure; + return cfg80211_testmode_reply(skb); + +nla_put_failure: + IWL_DEBUG_INFO(priv, "Failed creating NL attributes\n"); + return -ENOMSG; } -void iwl_testmode_free(struct iwl_priv *priv) + +/* + * This function handles the user application commands for register access. + * + * It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the + * handlers respectively. + * + * If it's an unknown commdn ID, -ENOSYS is returned; or -ENOMSG if the + * mandatory fields(IWL_TM_ATTR_REG_OFFSET,IWL_TM_ATTR_REG_VALUE32, + * IWL_TM_ATTR_REG_VALUE8) are missing; Otherwise 0 is replied indicating + * the success of the command execution. + * + * If IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_REG_READ32, the register read + * value is returned with IWL_TM_ATTR_REG_VALUE32. + * + * @hw: ieee80211_hw object that represents the device + * @tb: gnl message fields from the user space + */ +static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb) { - iwl_test_free(&priv->tst); + struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); + u32 ofs, val32, cmd; + u8 val8; + struct sk_buff *skb; + int status = 0; + + if (!tb[IWL_TM_ATTR_REG_OFFSET]) { + IWL_ERR(priv, "Missing register offset\n"); + return -ENOMSG; + } + ofs = nla_get_u32(tb[IWL_TM_ATTR_REG_OFFSET]); + IWL_INFO(priv, "testmode register access command offset 0x%x\n", ofs); + + /* Allow access only to FH/CSR/HBUS in direct mode. + Since we don't have the upper bounds for the CSR and HBUS segments, + we will use only the upper bound of FH for sanity check. */ + cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]); + if ((cmd == IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32 || + cmd == IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32 || + cmd == IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8) && + (ofs >= FH_MEM_UPPER_BOUND)) { + IWL_ERR(priv, "offset out of segment (0x0 - 0x%x)\n", + FH_MEM_UPPER_BOUND); + return -EINVAL; + } + + switch (cmd) { + case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32: + val32 = iwl_read_direct32(priv->trans, ofs); + IWL_INFO(priv, "32bit value to read 0x%x\n", val32); + + skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20); + if (!skb) { + IWL_ERR(priv, "Memory allocation fail\n"); + return -ENOMEM; + } + if (nla_put_u32(skb, IWL_TM_ATTR_REG_VALUE32, val32)) + goto nla_put_failure; + status = cfg80211_testmode_reply(skb); + if (status < 0) + IWL_ERR(priv, "Error sending msg : %d\n", status); + break; + case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32: + if (!tb[IWL_TM_ATTR_REG_VALUE32]) { + IWL_ERR(priv, "Missing value to write\n"); + return -ENOMSG; + } else { + val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]); + IWL_INFO(priv, "32bit value to write 0x%x\n", val32); + iwl_write_direct32(priv->trans, ofs, val32); + } + break; + case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8: + if (!tb[IWL_TM_ATTR_REG_VALUE8]) { + IWL_ERR(priv, "Missing value to write\n"); + return -ENOMSG; + } else { + val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]); + IWL_INFO(priv, "8bit value to write 0x%x\n", val8); + iwl_write8(priv->trans, ofs, val8); + } + break; + default: + IWL_ERR(priv, "Unknown testmode register command ID\n"); + return -ENOSYS; + } + + return status; + +nla_put_failure: + kfree_skb(skb); + return -EMSGSIZE; } + static int iwl_testmode_cfg_init_calib(struct iwl_priv *priv) { struct iwl_notification_wait calib_wait; @@ -199,7 +469,7 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb) struct sk_buff *skb; unsigned char *rsp_data_ptr = NULL; int status = 0, rsp_data_len = 0; - u32 inst_size = 0, data_size = 0; + u32 devid, inst_size = 0, data_size = 0; const struct fw_img *img; switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) { @@ -293,6 +563,39 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb) priv->tm_fixed_rate = nla_get_u32(tb[IWL_TM_ATTR_FIXRATE]); break; + case IWL_TM_CMD_APP2DEV_GET_FW_VERSION: + IWL_INFO(priv, "uCode version raw: 0x%x\n", + priv->fw->ucode_ver); + + skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20); + if (!skb) { + IWL_ERR(priv, "Memory allocation fail\n"); + return -ENOMEM; + } + if (nla_put_u32(skb, IWL_TM_ATTR_FW_VERSION, + priv->fw->ucode_ver)) + goto nla_put_failure; + status = cfg80211_testmode_reply(skb); + if (status < 0) + IWL_ERR(priv, "Error sending msg : %d\n", status); + break; + + case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID: + devid = priv->trans->hw_id; + IWL_INFO(priv, "hw version: 0x%x\n", devid); + + skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20); + if (!skb) { + IWL_ERR(priv, "Memory allocation fail\n"); + return -ENOMEM; + } + if (nla_put_u32(skb, IWL_TM_ATTR_DEVICE_ID, devid)) + goto nla_put_failure; + status = cfg80211_testmode_reply(skb); + if (status < 0) + IWL_ERR(priv, "Error sending msg : %d\n", status); + break; + case IWL_TM_CMD_APP2DEV_GET_FW_INFO: skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20 + 8); if (!skb) { @@ -327,6 +630,125 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb) return -EMSGSIZE; } + +/* + * This function handles the user application commands for uCode trace + * + * It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the + * handlers respectively. + * + * If it's an unknown commdn ID, -ENOSYS is replied; otherwise, the returned + * value of the actual command execution is replied to the user application. + * + * @hw: ieee80211_hw object that represents the device + * @tb: gnl message fields from the user space + */ +static int iwl_testmode_trace(struct ieee80211_hw *hw, struct nlattr **tb) +{ + struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); + struct sk_buff *skb; + int status = 0; + struct device *dev = priv->trans->dev; + + switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) { + case IWL_TM_CMD_APP2DEV_BEGIN_TRACE: + if (priv->testmode_trace.trace_enabled) + return -EBUSY; + + if (!tb[IWL_TM_ATTR_TRACE_SIZE]) + priv->testmode_trace.buff_size = TRACE_BUFF_SIZE_DEF; + else + priv->testmode_trace.buff_size = + nla_get_u32(tb[IWL_TM_ATTR_TRACE_SIZE]); + if (!priv->testmode_trace.buff_size) + return -EINVAL; + if (priv->testmode_trace.buff_size < TRACE_BUFF_SIZE_MIN || + priv->testmode_trace.buff_size > TRACE_BUFF_SIZE_MAX) + return -EINVAL; + + priv->testmode_trace.total_size = + priv->testmode_trace.buff_size + TRACE_BUFF_PADD; + priv->testmode_trace.cpu_addr = + dma_alloc_coherent(dev, + priv->testmode_trace.total_size, + &priv->testmode_trace.dma_addr, + GFP_KERNEL); + if (!priv->testmode_trace.cpu_addr) + return -ENOMEM; + priv->testmode_trace.trace_enabled = true; + priv->testmode_trace.trace_addr = (u8 *)PTR_ALIGN( + priv->testmode_trace.cpu_addr, 0x100); + memset(priv->testmode_trace.trace_addr, 0x03B, + priv->testmode_trace.buff_size); + skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, + sizeof(priv->testmode_trace.dma_addr) + 20); + if (!skb) { + IWL_ERR(priv, "Memory allocation fail\n"); + iwl_trace_cleanup(priv); + return -ENOMEM; + } + if (nla_put(skb, IWL_TM_ATTR_TRACE_ADDR, + sizeof(priv->testmode_trace.dma_addr), + (u64 *)&priv->testmode_trace.dma_addr)) + goto nla_put_failure; + status = cfg80211_testmode_reply(skb); + if (status < 0) { + IWL_ERR(priv, "Error sending msg : %d\n", status); + } + priv->testmode_trace.num_chunks = + DIV_ROUND_UP(priv->testmode_trace.buff_size, + DUMP_CHUNK_SIZE); + break; + + case IWL_TM_CMD_APP2DEV_END_TRACE: + iwl_trace_cleanup(priv); + break; + default: + IWL_ERR(priv, "Unknown testmode mem command ID\n"); + return -ENOSYS; + } + return status; + +nla_put_failure: + kfree_skb(skb); + if (nla_get_u32(tb[IWL_TM_ATTR_COMMAND]) == + IWL_TM_CMD_APP2DEV_BEGIN_TRACE) + iwl_trace_cleanup(priv); + return -EMSGSIZE; +} + +static int iwl_testmode_trace_dump(struct ieee80211_hw *hw, + struct sk_buff *skb, + struct netlink_callback *cb) +{ + struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); + int idx, length; + + if (priv->testmode_trace.trace_enabled && + priv->testmode_trace.trace_addr) { + idx = cb->args[4]; + if (idx >= priv->testmode_trace.num_chunks) + return -ENOENT; + length = DUMP_CHUNK_SIZE; + if (((idx + 1) == priv->testmode_trace.num_chunks) && + (priv->testmode_trace.buff_size % DUMP_CHUNK_SIZE)) + length = priv->testmode_trace.buff_size % + DUMP_CHUNK_SIZE; + + if (nla_put(skb, IWL_TM_ATTR_TRACE_DUMP, length, + priv->testmode_trace.trace_addr + + (DUMP_CHUNK_SIZE * idx))) + goto nla_put_failure; + idx++; + cb->args[4] = idx; + return 0; + } else + return -EFAULT; + + nla_put_failure: + return -ENOBUFS; +} + /* * This function handles the user application switch ucode ownership. * @@ -355,10 +777,10 @@ static int iwl_testmode_ownership(struct ieee80211_hw *hw, struct nlattr **tb) owner = nla_get_u8(tb[IWL_TM_ATTR_UCODE_OWNER]); if (owner == IWL_OWNERSHIP_DRIVER) { priv->ucode_owner = owner; - iwl_test_enable_notifications(&priv->tst, false); + priv->pre_rx_handler = NULL; } else if (owner == IWL_OWNERSHIP_TM) { + priv->pre_rx_handler = iwl_testmode_ucode_rx_pkt; priv->ucode_owner = owner; - iwl_test_enable_notifications(&priv->tst, true); } else { IWL_ERR(priv, "Invalid owner\n"); return -EINVAL; @@ -366,6 +788,180 @@ static int iwl_testmode_ownership(struct ieee80211_hw *hw, struct nlattr **tb) return 0; } +static int iwl_testmode_indirect_read(struct iwl_priv *priv, u32 addr, u32 size) +{ + struct iwl_trans *trans = priv->trans; + unsigned long flags; + int i; + + if (size & 0x3) + return -EINVAL; + priv->testmode_mem.buff_size = size; + priv->testmode_mem.buff_addr = + kmalloc(priv->testmode_mem.buff_size, GFP_KERNEL); + if (priv->testmode_mem.buff_addr == NULL) + return -ENOMEM; + + /* Hard-coded periphery absolute address */ + if (IWL_TM_ABS_PRPH_START <= addr && + addr < IWL_TM_ABS_PRPH_START + PRPH_END) { + spin_lock_irqsave(&trans->reg_lock, flags); + iwl_grab_nic_access(trans); + iwl_write32(trans, HBUS_TARG_PRPH_RADDR, + addr | (3 << 24)); + for (i = 0; i < size; i += 4) + *(u32 *)(priv->testmode_mem.buff_addr + i) = + iwl_read32(trans, HBUS_TARG_PRPH_RDAT); + iwl_release_nic_access(trans); + spin_unlock_irqrestore(&trans->reg_lock, flags); + } else { /* target memory (SRAM) */ + _iwl_read_targ_mem_words(trans, addr, + priv->testmode_mem.buff_addr, + priv->testmode_mem.buff_size / 4); + } + + priv->testmode_mem.num_chunks = + DIV_ROUND_UP(priv->testmode_mem.buff_size, DUMP_CHUNK_SIZE); + priv->testmode_mem.read_in_progress = true; + return 0; + +} + +static int iwl_testmode_indirect_write(struct iwl_priv *priv, u32 addr, + u32 size, unsigned char *buf) +{ + struct iwl_trans *trans = priv->trans; + u32 val, i; + unsigned long flags; + + if (IWL_TM_ABS_PRPH_START <= addr && + addr < IWL_TM_ABS_PRPH_START + PRPH_END) { + /* Periphery writes can be 1-3 bytes long, or DWORDs */ + if (size < 4) { + memcpy(&val, buf, size); + spin_lock_irqsave(&trans->reg_lock, flags); + iwl_grab_nic_access(trans); + iwl_write32(trans, HBUS_TARG_PRPH_WADDR, + (addr & 0x0000FFFF) | + ((size - 1) << 24)); + iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val); + iwl_release_nic_access(trans); + /* needed after consecutive writes w/o read */ + mmiowb(); + spin_unlock_irqrestore(&trans->reg_lock, flags); + } else { + if (size % 4) + return -EINVAL; + for (i = 0; i < size; i += 4) + iwl_write_prph(trans, addr+i, + *(u32 *)(buf+i)); + } + } else if (iwlagn_hw_valid_rtc_data_addr(addr) || + (IWLAGN_RTC_INST_LOWER_BOUND <= addr && + addr < IWLAGN_RTC_INST_UPPER_BOUND)) { + _iwl_write_targ_mem_words(trans, addr, buf, size/4); + } else + return -EINVAL; + return 0; +} + +/* + * This function handles the user application commands for SRAM data dump + * + * It retrieves the mandatory fields IWL_TM_ATTR_SRAM_ADDR and + * IWL_TM_ATTR_SRAM_SIZE to decide the memory area for SRAM data reading + * + * Several error will be retured, -EBUSY if the SRAM data retrieved by + * previous command has not been delivered to userspace, or -ENOMSG if + * the mandatory fields (IWL_TM_ATTR_SRAM_ADDR,IWL_TM_ATTR_SRAM_SIZE) + * are missing, or -ENOMEM if the buffer allocation fails. + * + * Otherwise 0 is replied indicating the success of the SRAM reading. + * + * @hw: ieee80211_hw object that represents the device + * @tb: gnl message fields from the user space + */ +static int iwl_testmode_indirect_mem(struct ieee80211_hw *hw, + struct nlattr **tb) +{ + struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); + u32 addr, size, cmd; + unsigned char *buf; + + /* Both read and write should be blocked, for atomicity */ + if (priv->testmode_mem.read_in_progress) + return -EBUSY; + + cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]); + if (!tb[IWL_TM_ATTR_MEM_ADDR]) { + IWL_ERR(priv, "Error finding memory offset address\n"); + return -ENOMSG; + } + addr = nla_get_u32(tb[IWL_TM_ATTR_MEM_ADDR]); + if (!tb[IWL_TM_ATTR_BUFFER_SIZE]) { + IWL_ERR(priv, "Error finding size for memory reading\n"); + return -ENOMSG; + } + size = nla_get_u32(tb[IWL_TM_ATTR_BUFFER_SIZE]); + + if (cmd == IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ) + return iwl_testmode_indirect_read(priv, addr, size); + else { + if (!tb[IWL_TM_ATTR_BUFFER_DUMP]) + return -EINVAL; + buf = (unsigned char *) nla_data(tb[IWL_TM_ATTR_BUFFER_DUMP]); + return iwl_testmode_indirect_write(priv, addr, size, buf); + } +} + +static int iwl_testmode_buffer_dump(struct ieee80211_hw *hw, + struct sk_buff *skb, + struct netlink_callback *cb) +{ + struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); + int idx, length; + + if (priv->testmode_mem.read_in_progress) { + idx = cb->args[4]; + if (idx >= priv->testmode_mem.num_chunks) { + iwl_mem_cleanup(priv); + return -ENOENT; + } + length = DUMP_CHUNK_SIZE; + if (((idx + 1) == priv->testmode_mem.num_chunks) && + (priv->testmode_mem.buff_size % DUMP_CHUNK_SIZE)) + length = priv->testmode_mem.buff_size % + DUMP_CHUNK_SIZE; + + if (nla_put(skb, IWL_TM_ATTR_BUFFER_DUMP, length, + priv->testmode_mem.buff_addr + + (DUMP_CHUNK_SIZE * idx))) + goto nla_put_failure; + idx++; + cb->args[4] = idx; + return 0; + } else + return -EFAULT; + + nla_put_failure: + return -ENOBUFS; +} + +static int iwl_testmode_notifications(struct ieee80211_hw *hw, + struct nlattr **tb) +{ + struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); + bool enable; + + enable = nla_get_flag(tb[IWL_TM_ATTR_ENABLE_NOTIFICATION]); + if (enable) + priv->pre_rx_handler = iwl_testmode_ucode_rx_pkt; + else + priv->pre_rx_handler = NULL; + return 0; +} + + /* The testmode gnl message handler that takes the gnl message from the * user space and parses it per the policy iwl_testmode_gnl_msg_policy, then * invoke the corresponding handlers. @@ -391,27 +987,32 @@ int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len) struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); int result; - result = iwl_test_parse(&priv->tst, tb, data, len); - if (result) + result = nla_parse(tb, IWL_TM_ATTR_MAX - 1, data, len, + iwl_testmode_gnl_msg_policy); + if (result != 0) { + IWL_ERR(priv, "Error parsing the gnl message : %d\n", result); return result; + } + /* IWL_TM_ATTR_COMMAND is absolutely mandatory */ + if (!tb[IWL_TM_ATTR_COMMAND]) { + IWL_ERR(priv, "Missing testmode command type\n"); + return -ENOMSG; + } /* in case multiple accesses to the device happens */ mutex_lock(&priv->mutex); + switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) { case IWL_TM_CMD_APP2DEV_UCODE: + IWL_DEBUG_INFO(priv, "testmode cmd to uCode\n"); + result = iwl_testmode_ucode(hw, tb); + break; case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32: case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32: case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8: - case IWL_TM_CMD_APP2DEV_BEGIN_TRACE: - case IWL_TM_CMD_APP2DEV_END_TRACE: - case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ: - case IWL_TM_CMD_APP2DEV_NOTIFICATIONS: - case IWL_TM_CMD_APP2DEV_GET_FW_VERSION: - case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID: - case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE: - result = iwl_test_handle_cmd(&priv->tst, tb); + IWL_DEBUG_INFO(priv, "testmode cmd to register\n"); + result = iwl_testmode_reg(hw, tb); break; - case IWL_TM_CMD_APP2DEV_GET_DEVICENAME: case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW: case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB: @@ -419,25 +1020,45 @@ int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len) case IWL_TM_CMD_APP2DEV_GET_EEPROM: case IWL_TM_CMD_APP2DEV_FIXRATE_REQ: case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW: + case IWL_TM_CMD_APP2DEV_GET_FW_VERSION: + case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID: case IWL_TM_CMD_APP2DEV_GET_FW_INFO: IWL_DEBUG_INFO(priv, "testmode cmd to driver\n"); result = iwl_testmode_driver(hw, tb); break; + case IWL_TM_CMD_APP2DEV_BEGIN_TRACE: + case IWL_TM_CMD_APP2DEV_END_TRACE: + case IWL_TM_CMD_APP2DEV_READ_TRACE: + IWL_DEBUG_INFO(priv, "testmode uCode trace cmd to driver\n"); + result = iwl_testmode_trace(hw, tb); + break; + case IWL_TM_CMD_APP2DEV_OWNERSHIP: IWL_DEBUG_INFO(priv, "testmode change uCode ownership\n"); result = iwl_testmode_ownership(hw, tb); break; + case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ: + case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE: + IWL_DEBUG_INFO(priv, "testmode indirect memory cmd " + "to driver\n"); + result = iwl_testmode_indirect_mem(hw, tb); + break; + + case IWL_TM_CMD_APP2DEV_NOTIFICATIONS: + IWL_DEBUG_INFO(priv, "testmode notifications cmd " + "to driver\n"); + result = iwl_testmode_notifications(hw, tb); + break; + default: IWL_ERR(priv, "Unknown testmode command\n"); result = -ENOSYS; break; } - mutex_unlock(&priv->mutex); - if (result) - IWL_ERR(priv, "Test cmd failed result=%d\n", result); + mutex_unlock(&priv->mutex); return result; } @@ -445,6 +1066,7 @@ int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb, struct netlink_callback *cb, void *data, int len) { + struct nlattr *tb[IWL_TM_ATTR_MAX]; struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); int result; u32 cmd; @@ -453,19 +1075,39 @@ int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb, /* offset by 1 since commands start at 0 */ cmd = cb->args[3] - 1; } else { - struct nlattr *tb[IWL_TM_ATTR_MAX]; - - result = iwl_test_parse(&priv->tst, tb, data, len); - if (result) + result = nla_parse(tb, IWL_TM_ATTR_MAX - 1, data, len, + iwl_testmode_gnl_msg_policy); + if (result) { + IWL_ERR(priv, + "Error parsing the gnl message : %d\n", result); return result; + } + /* IWL_TM_ATTR_COMMAND is absolutely mandatory */ + if (!tb[IWL_TM_ATTR_COMMAND]) { + IWL_ERR(priv, "Missing testmode command type\n"); + return -ENOMSG; + } cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]); cb->args[3] = cmd + 1; } /* in case multiple accesses to the device happens */ mutex_lock(&priv->mutex); - result = iwl_test_dump(&priv->tst, cmd, skb, cb); + switch (cmd) { + case IWL_TM_CMD_APP2DEV_READ_TRACE: + IWL_DEBUG_INFO(priv, "uCode trace cmd to driver\n"); + result = iwl_testmode_trace_dump(hw, skb, cb); + break; + case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP: + IWL_DEBUG_INFO(priv, "testmode sram dump cmd to driver\n"); + result = iwl_testmode_buffer_dump(hw, skb, cb); + break; + default: + result = -EINVAL; + break; + } + mutex_unlock(&priv->mutex); return result; } diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-testmode.h b/trunk/drivers/net/wireless/iwlwifi/dvm/testmode.h similarity index 100% rename from trunk/drivers/net/wireless/iwlwifi/iwl-testmode.h rename to trunk/drivers/net/wireless/iwlwifi/dvm/testmode.h diff --git a/trunk/drivers/net/wireless/iwlwifi/dvm/tx.c b/trunk/drivers/net/wireless/iwlwifi/dvm/tx.c index 5971a23aa47d..0dfaf649b257 100644 --- a/trunk/drivers/net/wireless/iwlwifi/dvm/tx.c +++ b/trunk/drivers/net/wireless/iwlwifi/dvm/tx.c @@ -403,7 +403,6 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) info->driver_data[0] = ctx; info->driver_data[1] = dev_cmd; - /* From now on, we cannot access info->control */ spin_lock(&priv->sta_lock); @@ -1183,8 +1182,7 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, } /*we can free until ssn % q.n_bd not inclusive */ - WARN_ON_ONCE(iwl_reclaim(priv, sta_id, tid, - txq_id, ssn, &skbs)); + WARN_ON(iwl_reclaim(priv, sta_id, tid, txq_id, ssn, &skbs)); iwlagn_check_ratid_empty(priv, sta_id, tid); freed = 0; diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-drv.c b/trunk/drivers/net/wireless/iwlwifi/iwl-drv.c index a175997e7829..49df0e9d5c5f 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-drv.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-drv.c @@ -131,8 +131,6 @@ struct iwl_drv { #define DVM_OP_MODE 0 #define MVM_OP_MODE 1 -/* Protects the table contents, i.e. the ops pointer & drv list */ -static struct mutex iwlwifi_opmode_table_mtx; static struct iwlwifi_opmode_table { const char *name; /* name: iwldvm, iwlmvm, etc */ const struct iwl_op_mode_ops *ops; /* pointer to op_mode ops */ @@ -778,7 +776,6 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context) const unsigned int api_min = drv->cfg->ucode_api_min; u32 api_ver; int i; - bool load_module = false; fw->ucode_capa.max_probe_length = 200; fw->ucode_capa.standard_phy_calibration_size = @@ -901,7 +898,6 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context) /* We have our copies now, allow OS release its copies */ release_firmware(ucode_raw); - mutex_lock(&iwlwifi_opmode_table_mtx); op = &iwlwifi_opmode_table[DVM_OP_MODE]; /* add this device to the list of devices using this op_mode */ @@ -911,14 +907,11 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context) const struct iwl_op_mode_ops *ops = op->ops; drv->op_mode = ops->start(drv->trans, drv->cfg, &drv->fw); - if (!drv->op_mode) { - mutex_unlock(&iwlwifi_opmode_table_mtx); + if (!drv->op_mode) goto out_unbind; - } } else { - load_module = true; + request_module_nowait("%s", op->name); } - mutex_unlock(&iwlwifi_opmode_table_mtx); /* * Complete the firmware request last so that @@ -926,14 +919,6 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context) * are doing the start() above. */ complete(&drv->request_firmware_complete); - - /* - * Load the module last so we don't block anything - * else from proceeding if the module fails to load - * or hangs loading. - */ - if (load_module) - request_module("%s", op->name); return; try_again: @@ -967,7 +952,6 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans, drv->cfg = cfg; init_completion(&drv->request_firmware_complete); - INIT_LIST_HEAD(&drv->list); ret = iwl_request_firmware(drv, true); @@ -990,16 +974,6 @@ void iwl_drv_stop(struct iwl_drv *drv) iwl_dealloc_ucode(drv); - mutex_lock(&iwlwifi_opmode_table_mtx); - /* - * List is empty (this item wasn't added) - * when firmware loading failed -- in that - * case we can't remove it from any list. - */ - if (!list_empty(&drv->list)) - list_del(&drv->list); - mutex_unlock(&iwlwifi_opmode_table_mtx); - kfree(drv); } @@ -1022,7 +996,6 @@ int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops) int i; struct iwl_drv *drv; - mutex_lock(&iwlwifi_opmode_table_mtx); for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) { if (strcmp(iwlwifi_opmode_table[i].name, name)) continue; @@ -1030,10 +1003,8 @@ int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops) list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list) drv->op_mode = ops->start(drv->trans, drv->cfg, &drv->fw); - mutex_unlock(&iwlwifi_opmode_table_mtx); return 0; } - mutex_unlock(&iwlwifi_opmode_table_mtx); return -EIO; } EXPORT_SYMBOL_GPL(iwl_opmode_register); @@ -1043,7 +1014,6 @@ void iwl_opmode_deregister(const char *name) int i; struct iwl_drv *drv; - mutex_lock(&iwlwifi_opmode_table_mtx); for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) { if (strcmp(iwlwifi_opmode_table[i].name, name)) continue; @@ -1056,10 +1026,8 @@ void iwl_opmode_deregister(const char *name) drv->op_mode = NULL; } } - mutex_unlock(&iwlwifi_opmode_table_mtx); return; } - mutex_unlock(&iwlwifi_opmode_table_mtx); } EXPORT_SYMBOL_GPL(iwl_opmode_deregister); @@ -1067,8 +1035,6 @@ static int __init iwl_drv_init(void) { int i; - mutex_init(&iwlwifi_opmode_table_mtx); - for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) INIT_LIST_HEAD(&iwlwifi_opmode_table[i].drv); diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-fh.h b/trunk/drivers/net/wireless/iwlwifi/iwl-fh.h index 806046641747..74bce97a8600 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-fh.h +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-fh.h @@ -421,8 +421,6 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl) (FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4) #define FH_TX_CHICKEN_BITS_REG (FH_MEM_LOWER_BOUND + 0xE98) -#define FH_TX_TRB_REG(_chan) (FH_MEM_LOWER_BOUND + 0x958 + (_chan) * 4) - /* Instruct FH to increment the retry count of a packet when * it is brought from the memory to TX-FIFO */ diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-io.c b/trunk/drivers/net/wireless/iwlwifi/iwl-io.c index 66c873399aba..5f2df70b73c1 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-io.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-io.c @@ -298,8 +298,8 @@ void iwl_clear_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask) } EXPORT_SYMBOL_GPL(iwl_clear_bits_prph); -void _iwl_read_targ_mem_dwords(struct iwl_trans *trans, u32 addr, - void *buf, int dwords) +void _iwl_read_targ_mem_words(struct iwl_trans *trans, u32 addr, + void *buf, int words) { unsigned long flags; int offs; @@ -308,26 +308,26 @@ void _iwl_read_targ_mem_dwords(struct iwl_trans *trans, u32 addr, spin_lock_irqsave(&trans->reg_lock, flags); if (likely(iwl_grab_nic_access(trans))) { iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr); - for (offs = 0; offs < dwords; offs++) + for (offs = 0; offs < words; offs++) vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT); iwl_release_nic_access(trans); } spin_unlock_irqrestore(&trans->reg_lock, flags); } -EXPORT_SYMBOL_GPL(_iwl_read_targ_mem_dwords); +EXPORT_SYMBOL_GPL(_iwl_read_targ_mem_words); u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr) { u32 value; - _iwl_read_targ_mem_dwords(trans, addr, &value, 1); + _iwl_read_targ_mem_words(trans, addr, &value, 1); return value; } EXPORT_SYMBOL_GPL(iwl_read_targ_mem); -int _iwl_write_targ_mem_dwords(struct iwl_trans *trans, u32 addr, - void *buf, int dwords) +int _iwl_write_targ_mem_words(struct iwl_trans *trans, u32 addr, + void *buf, int words) { unsigned long flags; int offs, result = 0; @@ -336,7 +336,7 @@ int _iwl_write_targ_mem_dwords(struct iwl_trans *trans, u32 addr, spin_lock_irqsave(&trans->reg_lock, flags); if (likely(iwl_grab_nic_access(trans))) { iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr); - for (offs = 0; offs < dwords; offs++) + for (offs = 0; offs < words; offs++) iwl_write32(trans, HBUS_TARG_MEM_WDAT, vals[offs]); iwl_release_nic_access(trans); } else @@ -345,10 +345,10 @@ int _iwl_write_targ_mem_dwords(struct iwl_trans *trans, u32 addr, return result; } -EXPORT_SYMBOL_GPL(_iwl_write_targ_mem_dwords); +EXPORT_SYMBOL_GPL(_iwl_write_targ_mem_words); int iwl_write_targ_mem(struct iwl_trans *trans, u32 addr, u32 val) { - return _iwl_write_targ_mem_dwords(trans, addr, &val, 1); + return _iwl_write_targ_mem_words(trans, addr, &val, 1); } EXPORT_SYMBOL_GPL(iwl_write_targ_mem); diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-io.h b/trunk/drivers/net/wireless/iwlwifi/iwl-io.h index 50d3819739d1..4a9a45f771ed 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-io.h +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-io.h @@ -76,18 +76,18 @@ void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 reg, u32 bits, u32 mask); void iwl_clear_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask); -void _iwl_read_targ_mem_dwords(struct iwl_trans *trans, u32 addr, - void *buf, int dwords); +void _iwl_read_targ_mem_words(struct iwl_trans *trans, u32 addr, + void *buf, int words); -#define iwl_read_targ_mem_bytes(trans, addr, buf, bufsize) \ +#define iwl_read_targ_mem_words(trans, addr, buf, bufsize) \ do { \ BUILD_BUG_ON((bufsize) % sizeof(u32)); \ - _iwl_read_targ_mem_dwords(trans, addr, buf, \ - (bufsize) / sizeof(u32));\ + _iwl_read_targ_mem_words(trans, addr, buf, \ + (bufsize) / sizeof(u32));\ } while (0) -int _iwl_write_targ_mem_dwords(struct iwl_trans *trans, u32 addr, - void *buf, int dwords); +int _iwl_write_targ_mem_words(struct iwl_trans *trans, u32 addr, + void *buf, int words); u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr); int iwl_write_targ_mem(struct iwl_trans *trans, u32 addr, u32 val); diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-test.c b/trunk/drivers/net/wireless/iwlwifi/iwl-test.c deleted file mode 100644 index 81e8c7126d72..000000000000 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-test.c +++ /dev/null @@ -1,856 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - -#include -#include - -#include "iwl-io.h" -#include "iwl-fh.h" -#include "iwl-prph.h" -#include "iwl-trans.h" -#include "iwl-test.h" -#include "iwl-csr.h" -#include "iwl-testmode.h" - -/* - * Periphery registers absolute lower bound. This is used in order to - * differentiate registery access through HBUS_TARG_PRPH_* and - * HBUS_TARG_MEM_* accesses. - */ -#define IWL_ABS_PRPH_START (0xA00000) - -/* - * The TLVs used in the gnl message policy between the kernel module and - * user space application. iwl_testmode_gnl_msg_policy is to be carried - * through the NL80211_CMD_TESTMODE channel regulated by nl80211. - * See iwl-testmode.h - */ -static -struct nla_policy iwl_testmode_gnl_msg_policy[IWL_TM_ATTR_MAX] = { - [IWL_TM_ATTR_COMMAND] = { .type = NLA_U32, }, - - [IWL_TM_ATTR_UCODE_CMD_ID] = { .type = NLA_U8, }, - [IWL_TM_ATTR_UCODE_CMD_DATA] = { .type = NLA_UNSPEC, }, - - [IWL_TM_ATTR_REG_OFFSET] = { .type = NLA_U32, }, - [IWL_TM_ATTR_REG_VALUE8] = { .type = NLA_U8, }, - [IWL_TM_ATTR_REG_VALUE32] = { .type = NLA_U32, }, - - [IWL_TM_ATTR_SYNC_RSP] = { .type = NLA_UNSPEC, }, - [IWL_TM_ATTR_UCODE_RX_PKT] = { .type = NLA_UNSPEC, }, - - [IWL_TM_ATTR_EEPROM] = { .type = NLA_UNSPEC, }, - - [IWL_TM_ATTR_TRACE_ADDR] = { .type = NLA_UNSPEC, }, - [IWL_TM_ATTR_TRACE_DUMP] = { .type = NLA_UNSPEC, }, - [IWL_TM_ATTR_TRACE_SIZE] = { .type = NLA_U32, }, - - [IWL_TM_ATTR_FIXRATE] = { .type = NLA_U32, }, - - [IWL_TM_ATTR_UCODE_OWNER] = { .type = NLA_U8, }, - - [IWL_TM_ATTR_MEM_ADDR] = { .type = NLA_U32, }, - [IWL_TM_ATTR_BUFFER_SIZE] = { .type = NLA_U32, }, - [IWL_TM_ATTR_BUFFER_DUMP] = { .type = NLA_UNSPEC, }, - - [IWL_TM_ATTR_FW_VERSION] = { .type = NLA_U32, }, - [IWL_TM_ATTR_DEVICE_ID] = { .type = NLA_U32, }, - [IWL_TM_ATTR_FW_TYPE] = { .type = NLA_U32, }, - [IWL_TM_ATTR_FW_INST_SIZE] = { .type = NLA_U32, }, - [IWL_TM_ATTR_FW_DATA_SIZE] = { .type = NLA_U32, }, - - [IWL_TM_ATTR_ENABLE_NOTIFICATION] = {.type = NLA_FLAG, }, -}; - -static inline void iwl_test_trace_clear(struct iwl_test *tst) -{ - memset(&tst->trace, 0, sizeof(struct iwl_test_trace)); -} - -static void iwl_test_trace_stop(struct iwl_test *tst) -{ - if (!tst->trace.enabled) - return; - - if (tst->trace.cpu_addr && tst->trace.dma_addr) - dma_free_coherent(tst->trans->dev, - tst->trace.tsize, - tst->trace.cpu_addr, - tst->trace.dma_addr); - - iwl_test_trace_clear(tst); -} - -static inline void iwl_test_mem_clear(struct iwl_test *tst) -{ - memset(&tst->mem, 0, sizeof(struct iwl_test_mem)); -} - -static inline void iwl_test_mem_stop(struct iwl_test *tst) -{ - if (!tst->mem.in_read) - return; - - iwl_test_mem_clear(tst); -} - -/* - * Initializes the test object - * During the lifetime of the test object it is assumed that the transport is - * started. The test object should be stopped before the transport is stopped. - */ -void iwl_test_init(struct iwl_test *tst, struct iwl_trans *trans, - struct iwl_test_ops *ops) -{ - tst->trans = trans; - tst->ops = ops; - - iwl_test_trace_clear(tst); - iwl_test_mem_clear(tst); -} -EXPORT_SYMBOL_GPL(iwl_test_init); - -/* - * Stop the test object - */ -void iwl_test_free(struct iwl_test *tst) -{ - iwl_test_mem_stop(tst); - iwl_test_trace_stop(tst); -} -EXPORT_SYMBOL_GPL(iwl_test_free); - -static inline int iwl_test_send_cmd(struct iwl_test *tst, - struct iwl_host_cmd *cmd) -{ - return tst->ops->send_cmd(tst->trans->op_mode, cmd); -} - -static inline bool iwl_test_valid_hw_addr(struct iwl_test *tst, u32 addr) -{ - return tst->ops->valid_hw_addr(addr); -} - -static inline u32 iwl_test_fw_ver(struct iwl_test *tst) -{ - return tst->ops->get_fw_ver(tst->trans->op_mode); -} - -static inline struct sk_buff* -iwl_test_alloc_reply(struct iwl_test *tst, int len) -{ - return tst->ops->alloc_reply(tst->trans->op_mode, len); -} - -static inline int iwl_test_reply(struct iwl_test *tst, struct sk_buff *skb) -{ - return tst->ops->reply(tst->trans->op_mode, skb); -} - -static inline struct sk_buff* -iwl_test_alloc_event(struct iwl_test *tst, int len) -{ - return tst->ops->alloc_event(tst->trans->op_mode, len); -} - -static inline void -iwl_test_event(struct iwl_test *tst, struct sk_buff *skb) -{ - return tst->ops->event(tst->trans->op_mode, skb); -} - -/* - * This function handles the user application commands to the fw. The fw - * commands are sent in a synchronuous manner. In case that the user requested - * to get commands response, it is send to the user. - */ -static int iwl_test_fw_cmd(struct iwl_test *tst, struct nlattr **tb) -{ - struct iwl_host_cmd cmd; - struct iwl_rx_packet *pkt; - struct sk_buff *skb; - void *reply_buf; - u32 reply_len; - int ret; - bool cmd_want_skb; - - memset(&cmd, 0, sizeof(struct iwl_host_cmd)); - - if (!tb[IWL_TM_ATTR_UCODE_CMD_ID] || - !tb[IWL_TM_ATTR_UCODE_CMD_DATA]) { - IWL_ERR(tst->trans, "Missing fw command mandatory fields\n"); - return -ENOMSG; - } - - cmd.flags = CMD_ON_DEMAND | CMD_SYNC; - cmd_want_skb = nla_get_flag(tb[IWL_TM_ATTR_UCODE_CMD_SKB]); - if (cmd_want_skb) - cmd.flags |= CMD_WANT_SKB; - - cmd.id = nla_get_u8(tb[IWL_TM_ATTR_UCODE_CMD_ID]); - cmd.data[0] = nla_data(tb[IWL_TM_ATTR_UCODE_CMD_DATA]); - cmd.len[0] = nla_len(tb[IWL_TM_ATTR_UCODE_CMD_DATA]); - cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY; - IWL_DEBUG_INFO(tst->trans, "test fw cmd=0x%x, flags 0x%x, len %d\n", - cmd.id, cmd.flags, cmd.len[0]); - - ret = iwl_test_send_cmd(tst, &cmd); - if (ret) { - IWL_ERR(tst->trans, "Failed to send hcmd\n"); - return ret; - } - if (!cmd_want_skb) - return ret; - - /* Handling return of SKB to the user */ - pkt = cmd.resp_pkt; - if (!pkt) { - IWL_ERR(tst->trans, "HCMD received a null response packet\n"); - return ret; - } - - reply_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; - skb = iwl_test_alloc_reply(tst, reply_len + 20); - reply_buf = kmalloc(reply_len, GFP_KERNEL); - if (!skb || !reply_buf) { - kfree_skb(skb); - kfree(reply_buf); - return -ENOMEM; - } - - /* The reply is in a page, that we cannot send to user space. */ - memcpy(reply_buf, &(pkt->hdr), reply_len); - iwl_free_resp(&cmd); - - if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND, - IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) || - nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, reply_len, reply_buf)) - goto nla_put_failure; - return iwl_test_reply(tst, skb); - -nla_put_failure: - IWL_DEBUG_INFO(tst->trans, "Failed creating NL attributes\n"); - kfree(reply_buf); - kfree_skb(skb); - return -ENOMSG; -} - -/* - * Handles the user application commands for register access. - */ -static int iwl_test_reg(struct iwl_test *tst, struct nlattr **tb) -{ - u32 ofs, val32, cmd; - u8 val8; - struct sk_buff *skb; - int status = 0; - struct iwl_trans *trans = tst->trans; - - if (!tb[IWL_TM_ATTR_REG_OFFSET]) { - IWL_ERR(trans, "Missing reg offset\n"); - return -ENOMSG; - } - - ofs = nla_get_u32(tb[IWL_TM_ATTR_REG_OFFSET]); - IWL_DEBUG_INFO(trans, "test reg access cmd offset=0x%x\n", ofs); - - cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]); - - /* - * Allow access only to FH/CSR/HBUS in direct mode. - * Since we don't have the upper bounds for the CSR and HBUS segments, - * we will use only the upper bound of FH for sanity check. - */ - if (ofs >= FH_MEM_UPPER_BOUND) { - IWL_ERR(trans, "offset out of segment (0x0 - 0x%x)\n", - FH_MEM_UPPER_BOUND); - return -EINVAL; - } - - switch (cmd) { - case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32: - val32 = iwl_read_direct32(tst->trans, ofs); - IWL_DEBUG_INFO(trans, "32 value to read 0x%x\n", val32); - - skb = iwl_test_alloc_reply(tst, 20); - if (!skb) { - IWL_ERR(trans, "Memory allocation fail\n"); - return -ENOMEM; - } - if (nla_put_u32(skb, IWL_TM_ATTR_REG_VALUE32, val32)) - goto nla_put_failure; - status = iwl_test_reply(tst, skb); - if (status < 0) - IWL_ERR(trans, "Error sending msg : %d\n", status); - break; - - case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32: - if (!tb[IWL_TM_ATTR_REG_VALUE32]) { - IWL_ERR(trans, "Missing value to write\n"); - return -ENOMSG; - } else { - val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]); - IWL_DEBUG_INFO(trans, "32b write val=0x%x\n", val32); - iwl_write_direct32(tst->trans, ofs, val32); - } - break; - - case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8: - if (!tb[IWL_TM_ATTR_REG_VALUE8]) { - IWL_ERR(trans, "Missing value to write\n"); - return -ENOMSG; - } else { - val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]); - IWL_DEBUG_INFO(trans, "8b write val=0x%x\n", val8); - iwl_write8(tst->trans, ofs, val8); - } - break; - - default: - IWL_ERR(trans, "Unknown test register cmd ID\n"); - return -ENOMSG; - } - - return status; - -nla_put_failure: - kfree_skb(skb); - return -EMSGSIZE; -} - -/* - * Handles the request to start FW tracing. Allocates of the trace buffer - * and sends a reply to user space with the address of the allocated buffer. - */ -static int iwl_test_trace_begin(struct iwl_test *tst, struct nlattr **tb) -{ - struct sk_buff *skb; - int status = 0; - - if (tst->trace.enabled) - return -EBUSY; - - if (!tb[IWL_TM_ATTR_TRACE_SIZE]) - tst->trace.size = TRACE_BUFF_SIZE_DEF; - else - tst->trace.size = - nla_get_u32(tb[IWL_TM_ATTR_TRACE_SIZE]); - - if (!tst->trace.size) - return -EINVAL; - - if (tst->trace.size < TRACE_BUFF_SIZE_MIN || - tst->trace.size > TRACE_BUFF_SIZE_MAX) - return -EINVAL; - - tst->trace.tsize = tst->trace.size + TRACE_BUFF_PADD; - tst->trace.cpu_addr = dma_alloc_coherent(tst->trans->dev, - tst->trace.tsize, - &tst->trace.dma_addr, - GFP_KERNEL); - if (!tst->trace.cpu_addr) - return -ENOMEM; - - tst->trace.enabled = true; - tst->trace.trace_addr = (u8 *)PTR_ALIGN(tst->trace.cpu_addr, 0x100); - - memset(tst->trace.trace_addr, 0x03B, tst->trace.size); - - skb = iwl_test_alloc_reply(tst, sizeof(tst->trace.dma_addr) + 20); - if (!skb) { - IWL_ERR(tst->trans, "Memory allocation fail\n"); - iwl_test_trace_stop(tst); - return -ENOMEM; - } - - if (nla_put(skb, IWL_TM_ATTR_TRACE_ADDR, - sizeof(tst->trace.dma_addr), - (u64 *)&tst->trace.dma_addr)) - goto nla_put_failure; - - status = iwl_test_reply(tst, skb); - if (status < 0) - IWL_ERR(tst->trans, "Error sending msg : %d\n", status); - - tst->trace.nchunks = DIV_ROUND_UP(tst->trace.size, - DUMP_CHUNK_SIZE); - - return status; - -nla_put_failure: - kfree_skb(skb); - if (nla_get_u32(tb[IWL_TM_ATTR_COMMAND]) == - IWL_TM_CMD_APP2DEV_BEGIN_TRACE) - iwl_test_trace_stop(tst); - return -EMSGSIZE; -} - -/* - * Handles indirect read from the periphery or the SRAM. The read is performed - * to a temporary buffer. The user space application should later issue a dump - */ -static int iwl_test_indirect_read(struct iwl_test *tst, u32 addr, u32 size) -{ - struct iwl_trans *trans = tst->trans; - unsigned long flags; - int i; - - if (size & 0x3) - return -EINVAL; - - tst->mem.size = size; - tst->mem.addr = kmalloc(tst->mem.size, GFP_KERNEL); - if (tst->mem.addr == NULL) - return -ENOMEM; - - /* Hard-coded periphery absolute address */ - if (IWL_ABS_PRPH_START <= addr && - addr < IWL_ABS_PRPH_START + PRPH_END) { - spin_lock_irqsave(&trans->reg_lock, flags); - iwl_grab_nic_access(trans); - iwl_write32(trans, HBUS_TARG_PRPH_RADDR, - addr | (3 << 24)); - for (i = 0; i < size; i += 4) - *(u32 *)(tst->mem.addr + i) = - iwl_read32(trans, HBUS_TARG_PRPH_RDAT); - iwl_release_nic_access(trans); - spin_unlock_irqrestore(&trans->reg_lock, flags); - } else { /* target memory (SRAM) */ - _iwl_read_targ_mem_dwords(trans, addr, - tst->mem.addr, - tst->mem.size / 4); - } - - tst->mem.nchunks = - DIV_ROUND_UP(tst->mem.size, DUMP_CHUNK_SIZE); - tst->mem.in_read = true; - return 0; - -} - -/* - * Handles indirect write to the periphery or SRAM. The is performed to a - * temporary buffer. - */ -static int iwl_test_indirect_write(struct iwl_test *tst, u32 addr, - u32 size, unsigned char *buf) -{ - struct iwl_trans *trans = tst->trans; - u32 val, i; - unsigned long flags; - - if (IWL_ABS_PRPH_START <= addr && - addr < IWL_ABS_PRPH_START + PRPH_END) { - /* Periphery writes can be 1-3 bytes long, or DWORDs */ - if (size < 4) { - memcpy(&val, buf, size); - spin_lock_irqsave(&trans->reg_lock, flags); - iwl_grab_nic_access(trans); - iwl_write32(trans, HBUS_TARG_PRPH_WADDR, - (addr & 0x0000FFFF) | - ((size - 1) << 24)); - iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val); - iwl_release_nic_access(trans); - /* needed after consecutive writes w/o read */ - mmiowb(); - spin_unlock_irqrestore(&trans->reg_lock, flags); - } else { - if (size % 4) - return -EINVAL; - for (i = 0; i < size; i += 4) - iwl_write_prph(trans, addr+i, - *(u32 *)(buf+i)); - } - } else if (iwl_test_valid_hw_addr(tst, addr)) { - _iwl_write_targ_mem_dwords(trans, addr, buf, size / 4); - } else { - return -EINVAL; - } - return 0; -} - -/* - * Handles the user application commands for indirect read/write - * to/from the periphery or the SRAM. - */ -static int iwl_test_indirect_mem(struct iwl_test *tst, struct nlattr **tb) -{ - u32 addr, size, cmd; - unsigned char *buf; - - /* Both read and write should be blocked, for atomicity */ - if (tst->mem.in_read) - return -EBUSY; - - cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]); - if (!tb[IWL_TM_ATTR_MEM_ADDR]) { - IWL_ERR(tst->trans, "Error finding memory offset address\n"); - return -ENOMSG; - } - addr = nla_get_u32(tb[IWL_TM_ATTR_MEM_ADDR]); - if (!tb[IWL_TM_ATTR_BUFFER_SIZE]) { - IWL_ERR(tst->trans, "Error finding size for memory reading\n"); - return -ENOMSG; - } - size = nla_get_u32(tb[IWL_TM_ATTR_BUFFER_SIZE]); - - if (cmd == IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ) { - return iwl_test_indirect_read(tst, addr, size); - } else { - if (!tb[IWL_TM_ATTR_BUFFER_DUMP]) - return -EINVAL; - buf = (unsigned char *)nla_data(tb[IWL_TM_ATTR_BUFFER_DUMP]); - return iwl_test_indirect_write(tst, addr, size, buf); - } -} - -/* - * Enable notifications to user space - */ -static int iwl_test_notifications(struct iwl_test *tst, - struct nlattr **tb) -{ - tst->notify = nla_get_flag(tb[IWL_TM_ATTR_ENABLE_NOTIFICATION]); - return 0; -} - -/* - * Handles the request to get the device id - */ -static int iwl_test_get_dev_id(struct iwl_test *tst, struct nlattr **tb) -{ - u32 devid = tst->trans->hw_id; - struct sk_buff *skb; - int status; - - IWL_DEBUG_INFO(tst->trans, "hw version: 0x%x\n", devid); - - skb = iwl_test_alloc_reply(tst, 20); - if (!skb) { - IWL_ERR(tst->trans, "Memory allocation fail\n"); - return -ENOMEM; - } - - if (nla_put_u32(skb, IWL_TM_ATTR_DEVICE_ID, devid)) - goto nla_put_failure; - status = iwl_test_reply(tst, skb); - if (status < 0) - IWL_ERR(tst->trans, "Error sending msg : %d\n", status); - - return 0; - -nla_put_failure: - kfree_skb(skb); - return -EMSGSIZE; -} - -/* - * Handles the request to get the FW version - */ -static int iwl_test_get_fw_ver(struct iwl_test *tst, struct nlattr **tb) -{ - struct sk_buff *skb; - int status; - u32 ver = iwl_test_fw_ver(tst); - - IWL_DEBUG_INFO(tst->trans, "uCode version raw: 0x%x\n", ver); - - skb = iwl_test_alloc_reply(tst, 20); - if (!skb) { - IWL_ERR(tst->trans, "Memory allocation fail\n"); - return -ENOMEM; - } - - if (nla_put_u32(skb, IWL_TM_ATTR_FW_VERSION, ver)) - goto nla_put_failure; - - status = iwl_test_reply(tst, skb); - if (status < 0) - IWL_ERR(tst->trans, "Error sending msg : %d\n", status); - - return 0; - -nla_put_failure: - kfree_skb(skb); - return -EMSGSIZE; -} - -/* - * Parse the netlink message and validate that the IWL_TM_ATTR_CMD exists - */ -int iwl_test_parse(struct iwl_test *tst, struct nlattr **tb, - void *data, int len) -{ - int result; - - result = nla_parse(tb, IWL_TM_ATTR_MAX - 1, data, len, - iwl_testmode_gnl_msg_policy); - if (result) { - IWL_ERR(tst->trans, "Fail parse gnl msg: %d\n", result); - return result; - } - - /* IWL_TM_ATTR_COMMAND is absolutely mandatory */ - if (!tb[IWL_TM_ATTR_COMMAND]) { - IWL_ERR(tst->trans, "Missing testmode command type\n"); - return -ENOMSG; - } - return 0; -} -EXPORT_SYMBOL_GPL(iwl_test_parse); - -/* - * Handle test commands. - * Returns 1 for unknown commands (not handled by the test object); negative - * value in case of error. - */ -int iwl_test_handle_cmd(struct iwl_test *tst, struct nlattr **tb) -{ - int result; - - switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) { - case IWL_TM_CMD_APP2DEV_UCODE: - IWL_DEBUG_INFO(tst->trans, "test cmd to uCode\n"); - result = iwl_test_fw_cmd(tst, tb); - break; - - case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32: - case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32: - case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8: - IWL_DEBUG_INFO(tst->trans, "test cmd to register\n"); - result = iwl_test_reg(tst, tb); - break; - - case IWL_TM_CMD_APP2DEV_BEGIN_TRACE: - IWL_DEBUG_INFO(tst->trans, "test uCode trace cmd to driver\n"); - result = iwl_test_trace_begin(tst, tb); - break; - - case IWL_TM_CMD_APP2DEV_END_TRACE: - iwl_test_trace_stop(tst); - result = 0; - break; - - case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ: - case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE: - IWL_DEBUG_INFO(tst->trans, "test indirect memory cmd\n"); - result = iwl_test_indirect_mem(tst, tb); - break; - - case IWL_TM_CMD_APP2DEV_NOTIFICATIONS: - IWL_DEBUG_INFO(tst->trans, "test notifications cmd\n"); - result = iwl_test_notifications(tst, tb); - break; - - case IWL_TM_CMD_APP2DEV_GET_FW_VERSION: - IWL_DEBUG_INFO(tst->trans, "test get FW ver cmd\n"); - result = iwl_test_get_fw_ver(tst, tb); - break; - - case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID: - IWL_DEBUG_INFO(tst->trans, "test Get device ID cmd\n"); - result = iwl_test_get_dev_id(tst, tb); - break; - - default: - IWL_DEBUG_INFO(tst->trans, "Unknown test command\n"); - result = 1; - break; - } - return result; -} -EXPORT_SYMBOL_GPL(iwl_test_handle_cmd); - -static int iwl_test_trace_dump(struct iwl_test *tst, struct sk_buff *skb, - struct netlink_callback *cb) -{ - int idx, length; - - if (!tst->trace.enabled || !tst->trace.trace_addr) - return -EFAULT; - - idx = cb->args[4]; - if (idx >= tst->trace.nchunks) - return -ENOENT; - - length = DUMP_CHUNK_SIZE; - if (((idx + 1) == tst->trace.nchunks) && - (tst->trace.size % DUMP_CHUNK_SIZE)) - length = tst->trace.size % - DUMP_CHUNK_SIZE; - - if (nla_put(skb, IWL_TM_ATTR_TRACE_DUMP, length, - tst->trace.trace_addr + (DUMP_CHUNK_SIZE * idx))) - goto nla_put_failure; - - cb->args[4] = ++idx; - return 0; - - nla_put_failure: - return -ENOBUFS; -} - -static int iwl_test_buffer_dump(struct iwl_test *tst, struct sk_buff *skb, - struct netlink_callback *cb) -{ - int idx, length; - - if (!tst->mem.in_read) - return -EFAULT; - - idx = cb->args[4]; - if (idx >= tst->mem.nchunks) { - iwl_test_mem_stop(tst); - return -ENOENT; - } - - length = DUMP_CHUNK_SIZE; - if (((idx + 1) == tst->mem.nchunks) && - (tst->mem.size % DUMP_CHUNK_SIZE)) - length = tst->mem.size % DUMP_CHUNK_SIZE; - - if (nla_put(skb, IWL_TM_ATTR_BUFFER_DUMP, length, - tst->mem.addr + (DUMP_CHUNK_SIZE * idx))) - goto nla_put_failure; - - cb->args[4] = ++idx; - return 0; - - nla_put_failure: - return -ENOBUFS; -} - -/* - * Handle dump commands. - * Returns 1 for unknown commands (not handled by the test object); negative - * value in case of error. - */ -int iwl_test_dump(struct iwl_test *tst, u32 cmd, struct sk_buff *skb, - struct netlink_callback *cb) -{ - int result; - - switch (cmd) { - case IWL_TM_CMD_APP2DEV_READ_TRACE: - IWL_DEBUG_INFO(tst->trans, "uCode trace cmd\n"); - result = iwl_test_trace_dump(tst, skb, cb); - break; - - case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP: - IWL_DEBUG_INFO(tst->trans, "testmode sram dump cmd\n"); - result = iwl_test_buffer_dump(tst, skb, cb); - break; - - default: - result = 1; - break; - } - return result; -} -EXPORT_SYMBOL_GPL(iwl_test_dump); - -/* - * Multicast a spontaneous messages from the device to the user space. - */ -static void iwl_test_send_rx(struct iwl_test *tst, - struct iwl_rx_cmd_buffer *rxb) -{ - struct sk_buff *skb; - struct iwl_rx_packet *data; - int length; - - data = rxb_addr(rxb); - length = le32_to_cpu(data->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; - - /* the length doesn't include len_n_flags field, so add it manually */ - length += sizeof(__le32); - - skb = iwl_test_alloc_event(tst, length + 20); - if (skb == NULL) { - IWL_ERR(tst->trans, "Out of memory for message to user\n"); - return; - } - - if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND, - IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) || - nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, length, data)) - goto nla_put_failure; - - iwl_test_event(tst, skb); - return; - -nla_put_failure: - kfree_skb(skb); - IWL_ERR(tst->trans, "Ouch, overran buffer, check allocation!\n"); -} - -/* - * Called whenever a Rx frames is recevied from the device. If notifications to - * the user space are requested, sends the frames to the user. - */ -void iwl_test_rx(struct iwl_test *tst, struct iwl_rx_cmd_buffer *rxb) -{ - if (tst->notify) - iwl_test_send_rx(tst, rxb); -} -EXPORT_SYMBOL_GPL(iwl_test_rx); diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-test.h b/trunk/drivers/net/wireless/iwlwifi/iwl-test.h deleted file mode 100644 index e13ffa8acc02..000000000000 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-test.h +++ /dev/null @@ -1,161 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - -#ifndef __IWL_TEST_H__ -#define __IWL_TEST_H__ - -#include -#include "iwl-trans.h" - -struct iwl_test_trace { - u32 size; - u32 tsize; - u32 nchunks; - u8 *cpu_addr; - u8 *trace_addr; - dma_addr_t dma_addr; - bool enabled; -}; - -struct iwl_test_mem { - u32 size; - u32 nchunks; - u8 *addr; - bool in_read; -}; - -/* - * struct iwl_test_ops: callback to the op mode - * - * The structure defines the callbacks that the op_mode should handle, - * inorder to handle logic that is out of the scope of iwl_test. The - * op_mode must set all the callbacks. - - * @send_cmd: handler that is used by the test object to request the - * op_mode to send a command to the fw. - * - * @valid_hw_addr: handler that is used by the test object to request the - * op_mode to check if the given address is a valid address. - * - * @get_fw_ver: handler used to get the FW version. - * - * @alloc_reply: handler used by the test object to request the op_mode - * to allocate an skb for sending a reply to the user, and initialize - * the skb. It is assumed that the test object only fills the required - * attributes. - * - * @reply: handler used by the test object to request the op_mode to reply - * to a request. The skb is an skb previously allocated by the the - * alloc_reply callback. - I - * @alloc_event: handler used by the test object to request the op_mode - * to allocate an skb for sending an event, and initialize - * the skb. It is assumed that the test object only fills the required - * attributes. - * - * @reply: handler used by the test object to request the op_mode to send - * an event. The skb is an skb previously allocated by the the - * alloc_event callback. - */ -struct iwl_test_ops { - int (*send_cmd)(struct iwl_op_mode *op_modes, - struct iwl_host_cmd *cmd); - bool (*valid_hw_addr)(u32 addr); - u32 (*get_fw_ver)(struct iwl_op_mode *op_mode); - - struct sk_buff *(*alloc_reply)(struct iwl_op_mode *op_mode, int len); - int (*reply)(struct iwl_op_mode *op_mode, struct sk_buff *skb); - struct sk_buff* (*alloc_event)(struct iwl_op_mode *op_mode, int len); - void (*event)(struct iwl_op_mode *op_mode, struct sk_buff *skb); -}; - -struct iwl_test { - struct iwl_trans *trans; - struct iwl_test_ops *ops; - struct iwl_test_trace trace; - struct iwl_test_mem mem; - bool notify; -}; - -void iwl_test_init(struct iwl_test *tst, struct iwl_trans *trans, - struct iwl_test_ops *ops); - -void iwl_test_free(struct iwl_test *tst); - -int iwl_test_parse(struct iwl_test *tst, struct nlattr **tb, - void *data, int len); - -int iwl_test_handle_cmd(struct iwl_test *tst, struct nlattr **tb); - -int iwl_test_dump(struct iwl_test *tst, u32 cmd, struct sk_buff *skb, - struct netlink_callback *cb); - -void iwl_test_rx(struct iwl_test *tst, struct iwl_rx_cmd_buffer *rxb); - -static inline void iwl_test_enable_notifications(struct iwl_test *tst, - bool enable) -{ - tst->notify = enable; -} - -#endif diff --git a/trunk/drivers/net/wireless/iwlwifi/pcie/6000.c b/trunk/drivers/net/wireless/iwlwifi/pcie/6000.c index 4a57624afc40..cb08ba03aae7 100644 --- a/trunk/drivers/net/wireless/iwlwifi/pcie/6000.c +++ b/trunk/drivers/net/wireless/iwlwifi/pcie/6000.c @@ -258,7 +258,6 @@ const struct iwl_cfg iwl6030_2bg_cfg = { .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ .base_params = &iwl6000_g2_base_params, \ .bt_params = &iwl6000_bt_params, \ - .eeprom_params = &iwl6000_eeprom_params, \ .need_temp_offset_calib = true, \ .led_mode = IWL_LED_RF_STATE, \ .adv_pm = true diff --git a/trunk/drivers/net/wireless/iwlwifi/pcie/internal.h b/trunk/drivers/net/wireless/iwlwifi/pcie/internal.h index 5024fb662bf6..94201c4d6227 100644 --- a/trunk/drivers/net/wireless/iwlwifi/pcie/internal.h +++ b/trunk/drivers/net/wireless/iwlwifi/pcie/internal.h @@ -339,9 +339,16 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans, struct iwl_tx_queue *txq, u16 byte_cnt); +void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue); +void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index); +void iwl_trans_tx_queue_set_status(struct iwl_trans *trans, + struct iwl_tx_queue *txq, + int tx_fifo_id, bool active); +void __iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, + int fifo, int sta_id, int tid, + int frame_limit, u16 ssn); void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, int sta_id, int tid, int frame_limit, u16 ssn); -void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue); void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq, enum dma_data_direction dma_dir); int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index, diff --git a/trunk/drivers/net/wireless/iwlwifi/pcie/trans.c b/trunk/drivers/net/wireless/iwlwifi/pcie/trans.c index 32ab8ea56135..7461a6a14338 100644 --- a/trunk/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/trunk/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -298,10 +298,6 @@ static void iwl_trans_pcie_queue_stuck_timer(unsigned long data) struct iwl_tx_queue *txq = (void *)data; struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie); - u32 scd_sram_addr = trans_pcie->scd_base_addr + - SCD_TX_STTS_MEM_LOWER_BOUND + (16 * txq->q.id); - u8 buf[16]; - int i; spin_lock(&txq->lock); /* check if triggered erroneously */ @@ -311,40 +307,15 @@ static void iwl_trans_pcie_queue_stuck_timer(unsigned long data) } spin_unlock(&txq->lock); + IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id, jiffies_to_msecs(trans_pcie->wd_timeout)); IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n", txq->q.read_ptr, txq->q.write_ptr); - - iwl_read_targ_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf)); - - iwl_print_hex_error(trans, buf, sizeof(buf)); - - for (i = 0; i < FH_TCSR_CHNL_NUM; i++) - IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", i, - iwl_read_direct32(trans, FH_TX_TRB_REG(i))); - - for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { - u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(i)); - u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7; - bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE)); - u32 tbl_dw = - iwl_read_targ_mem(trans, - trans_pcie->scd_base_addr + - SCD_TRANS_TBL_OFFSET_QUEUE(i)); - - if (i & 0x1) - tbl_dw = (tbl_dw & 0xFFFF0000) >> 16; - else - tbl_dw = tbl_dw & 0x0000FFFF; - - IWL_ERR(trans, - "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n", - i, active ? "" : "in", fifo, tbl_dw, - iwl_read_prph(trans, - SCD_QUEUE_RDPTR(i)) & (txq->q.n_bd - 1), - iwl_read_prph(trans, SCD_QUEUE_WRPTR(i))); - } + IWL_ERR(trans, "Current HW read_ptr %d write_ptr %d\n", + iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq->q.id)) + & (TFD_QUEUE_SIZE_MAX - 1), + iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq->q.id))); iwl_op_mode_nic_error(trans->op_mode); } @@ -1083,21 +1054,23 @@ static void iwl_tx_start(struct iwl_trans *trans) iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, trans_pcie->scd_bc_tbls.dma >> 10); - /* The chain extension of the SCD doesn't work well. This feature is - * enabled by default by the HW, so we need to disable it manually. - */ - iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); - for (i = 0; i < trans_pcie->n_q_to_fifo; i++) { int fifo = trans_pcie->setup_q_to_fifo[i]; - iwl_trans_pcie_txq_enable(trans, i, fifo, IWL_INVALID_STATION, - IWL_TID_NON_QOS, SCD_FRAME_LIMIT, 0); + __iwl_trans_pcie_txq_enable(trans, i, fifo, IWL_INVALID_STATION, + IWL_TID_NON_QOS, + SCD_FRAME_LIMIT, 0); } /* Activate all Tx DMA/FIFO channels */ iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7)); + /* The chain extension of the SCD doesn't work well. This feature is + * enabled by default by the HW, so we need to disable it manually. + */ + iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); + + /* Enable DMA channel */ for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++) iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan), @@ -1266,19 +1239,6 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, spin_lock(&txq->lock); - /* In AGG mode, the index in the ring must correspond to the WiFi - * sequence number. This is a HW requirements to help the SCD to parse - * the BA. - * Check here that the packets are in the right place on the ring. - */ -#ifdef CONFIG_IWLWIFI_DEBUG - wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); - WARN_ONCE((iwl_read_prph(trans, SCD_AGGR_SEL) & BIT(txq_id)) && - ((wifi_seq & 0xff) != q->write_ptr), - "Q: %d WiFi Seq %d tfdNum %d", - txq_id, wifi_seq, q->write_ptr); -#endif - /* Set up driver data for this TFD */ txq->entries[q->write_ptr].skb = skb; txq->entries[q->write_ptr].cmd = dev_cmd; @@ -1372,8 +1332,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, skb->data + hdr_len, secondlen); /* start timer if queue currently empty */ - if (txq->need_update && q->read_ptr == q->write_ptr && - trans_pcie->wd_timeout) + if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout) mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout); /* Tell device the write index *just past* this latest filled TFD */ diff --git a/trunk/drivers/net/wireless/iwlwifi/pcie/tx.c b/trunk/drivers/net/wireless/iwlwifi/pcie/tx.c index 6baf8deef519..35e82161ca43 100644 --- a/trunk/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/trunk/drivers/net/wireless/iwlwifi/pcie/tx.c @@ -380,8 +380,8 @@ static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans, tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent; } -static int iwl_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, - u16 txq_id) +static int iwlagn_tx_queue_set_q2ratid(struct iwl_trans *trans, u16 ra_tid, + u16 txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 tbl_dw_addr; @@ -405,7 +405,7 @@ static int iwl_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, return 0; } -static inline void iwl_txq_set_inactive(struct iwl_trans *trans, u16 txq_id) +static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id) { /* Simply stop the queue, but don't change any configuration; * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ @@ -415,16 +415,46 @@ static inline void iwl_txq_set_inactive(struct iwl_trans *trans, u16 txq_id) (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); } -void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, - int sta_id, int tid, int frame_limit, u16 ssn) +void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index) +{ + IWL_DEBUG_TX_QUEUES(trans, "Q %d WrPtr: %d\n", txq_id, index & 0xff); + iwl_write_direct32(trans, HBUS_TARG_WRPTR, + (index & 0xff) | (txq_id << 8)); + iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), index); +} + +void iwl_trans_tx_queue_set_status(struct iwl_trans *trans, + struct iwl_tx_queue *txq, + int tx_fifo_id, bool active) +{ + int txq_id = txq->q.id; + + iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), + (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) | + (tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) | + (1 << SCD_QUEUE_STTS_REG_POS_WSL) | + SCD_QUEUE_STTS_REG_MSK); + + if (active) + IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d\n", + txq_id, tx_fifo_id); + else + IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); +} + +void __iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, + int fifo, int sta_id, int tid, + int frame_limit, u16 ssn) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + lockdep_assert_held(&trans_pcie->irq_lock); + if (test_and_set_bit(txq_id, trans_pcie->queue_used)) WARN_ONCE(1, "queue %d already used - expect issues", txq_id); /* Stop this Tx queue before configuring it */ - iwl_txq_set_inactive(trans, txq_id); + iwlagn_tx_queue_stop_scheduler(trans, txq_id); /* Set this queue as a chain-building queue unless it is CMD queue */ if (txq_id != trans_pcie->cmd_queue) @@ -435,27 +465,17 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, u16 ra_tid = BUILD_RAxTID(sta_id, tid); /* Map receiver-address / traffic-ID to this queue */ - iwl_txq_set_ratid_map(trans, ra_tid, txq_id); + iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id); /* enable aggregations for the queue */ iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); - } else { - /* - * disable aggregations for the queue, this will also make the - * ra_tid mapping configuration irrelevant since it is now a - * non-AGG queue. - */ - iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); } /* Place first TFD at index corresponding to start sequence number. * Assumes that ssn_idx is valid (!= 0xFFF) */ trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff); trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff); - - iwl_write_direct32(trans, HBUS_TARG_WRPTR, - (ssn & 0xff) | (txq_id << 8)); - iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn); + iwl_trans_set_wr_ptrs(trans, txq_id, ssn); /* Set up Tx window size and frame limit for this queue */ iwl_write_targ_mem(trans, trans_pcie->scd_base_addr + @@ -468,34 +488,43 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ - iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), - (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) | - (fifo << SCD_QUEUE_STTS_REG_POS_TXF) | - (1 << SCD_QUEUE_STTS_REG_POS_WSL) | - SCD_QUEUE_STTS_REG_MSK); - IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d WrPtr: %d\n", - txq_id, fifo, ssn & 0xff); + iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], + fifo, true); +} + +void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, + int sta_id, int tid, int frame_limit, u16 ssn) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + unsigned long flags; + + spin_lock_irqsave(&trans_pcie->irq_lock, flags); + + __iwl_trans_pcie_txq_enable(trans, txq_id, fifo, sta_id, + tid, frame_limit, ssn); + + spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); } void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - u16 rd_ptr, wr_ptr; - int n_bd = trans_pcie->txq[txq_id].q.n_bd; if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) { WARN_ONCE(1, "queue %d not used", txq_id); return; } - rd_ptr = iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & (n_bd - 1); - wr_ptr = iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)); + iwlagn_tx_queue_stop_scheduler(trans, txq_id); + + iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); - WARN_ONCE(rd_ptr != wr_ptr, "queue %d isn't empty: [%d,%d]", - txq_id, rd_ptr, wr_ptr); + trans_pcie->txq[txq_id].q.read_ptr = 0; + trans_pcie->txq[txq_id].q.write_ptr = 0; + iwl_trans_set_wr_ptrs(trans, txq_id, 0); - iwl_txq_set_inactive(trans, txq_id); - IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); + iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], + 0, false); } /*************** HOST COMMAND QUEUE FUNCTIONS *****/ diff --git a/trunk/drivers/net/wireless/mwifiex/uap_cmd.c b/trunk/drivers/net/wireless/mwifiex/uap_cmd.c index 89f9a2a45de3..8173ab66066d 100644 --- a/trunk/drivers/net/wireless/mwifiex/uap_cmd.c +++ b/trunk/drivers/net/wireless/mwifiex/uap_cmd.c @@ -27,17 +27,6 @@ int mwifiex_set_secure_params(struct mwifiex_private *priv, struct cfg80211_ap_settings *params) { int i; - if (!params->privacy) { - bss_config->protocol = PROTOCOL_NO_SECURITY; - bss_config->key_mgmt = KEY_MGMT_NONE; - bss_config->wpa_cfg.length = 0; - priv->sec_info.wep_enabled = 0; - priv->sec_info.wpa_enabled = 0; - priv->sec_info.wpa2_enabled = 0; - - return 0; - } - switch (params->auth_type) { case NL80211_AUTHTYPE_OPEN_SYSTEM: bss_config->auth_mode = WLAN_AUTH_OPEN; diff --git a/trunk/drivers/net/wireless/rndis_wlan.c b/trunk/drivers/net/wireless/rndis_wlan.c index dfcd02ab6cae..2e9e6af21362 100644 --- a/trunk/drivers/net/wireless/rndis_wlan.c +++ b/trunk/drivers/net/wireless/rndis_wlan.c @@ -2110,7 +2110,7 @@ static int rndis_check_bssid_list(struct usbnet *usbdev, u8 *match_bssid, while (check_bssid_list_item(bssid, bssid_len, buf, len)) { if (rndis_bss_info_update(usbdev, bssid) && match_bssid && matched) { - if (ether_addr_equal(bssid->mac, match_bssid)) + if (!ether_addr_equal(bssid->mac, match_bssid)) *matched = true; } diff --git a/trunk/include/linux/can.h b/trunk/include/linux/can.h index 1a66cf6112ae..17334c09bd93 100644 --- a/trunk/include/linux/can.h +++ b/trunk/include/linux/can.h @@ -46,67 +46,18 @@ typedef __u32 canid_t; */ typedef __u32 can_err_mask_t; -/* CAN payload length and DLC definitions according to ISO 11898-1 */ -#define CAN_MAX_DLC 8 -#define CAN_MAX_DLEN 8 - -/* CAN FD payload length and DLC definitions according to ISO 11898-7 */ -#define CANFD_MAX_DLC 15 -#define CANFD_MAX_DLEN 64 - /** * struct can_frame - basic CAN frame structure - * @can_id: CAN ID of the frame and CAN_*_FLAG flags, see canid_t definition - * @can_dlc: frame payload length in byte (0 .. 8) aka data length code - * N.B. the DLC field from ISO 11898-1 Chapter 8.4.2.3 has a 1:1 - * mapping of the 'data length code' to the real payload length - * @data: CAN frame payload (up to 8 byte) + * @can_id: the CAN ID of the frame and CAN_*_FLAG flags, see above. + * @can_dlc: the data length field of the CAN frame + * @data: the CAN frame payload. */ struct can_frame { canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */ - __u8 can_dlc; /* frame payload length in byte (0 .. CAN_MAX_DLEN) */ - __u8 data[CAN_MAX_DLEN] __attribute__((aligned(8))); + __u8 can_dlc; /* data length code: 0 .. 8 */ + __u8 data[8] __attribute__((aligned(8))); }; -/* - * defined bits for canfd_frame.flags - * - * As the default for CAN FD should be to support the high data rate in the - * payload section of the frame (HDR) and to support up to 64 byte in the - * data section (EDL) the bits are only set in the non-default case. - * Btw. as long as there's no real implementation for CAN FD network driver - * these bits are only preliminary. - * - * RX: NOHDR/NOEDL - info about received CAN FD frame - * ESI - bit from originating CAN controller - * TX: NOHDR/NOEDL - control per-frame settings if supported by CAN controller - * ESI - bit is set by local CAN controller - */ -#define CANFD_NOHDR 0x01 /* frame without high data rate */ -#define CANFD_NOEDL 0x02 /* frame without extended data length */ -#define CANFD_ESI 0x04 /* error state indicator */ - -/** - * struct canfd_frame - CAN flexible data rate frame structure - * @can_id: CAN ID of the frame and CAN_*_FLAG flags, see canid_t definition - * @len: frame payload length in byte (0 .. CANFD_MAX_DLEN) - * @flags: additional flags for CAN FD - * @__res0: reserved / padding - * @__res1: reserved / padding - * @data: CAN FD frame payload (up to CANFD_MAX_DLEN byte) - */ -struct canfd_frame { - canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */ - __u8 len; /* frame payload length in byte */ - __u8 flags; /* additional flags for CAN FD */ - __u8 __res0; /* reserved / padding */ - __u8 __res1; /* reserved / padding */ - __u8 data[CANFD_MAX_DLEN] __attribute__((aligned(8))); -}; - -#define CAN_MTU (sizeof(struct can_frame)) -#define CANFD_MTU (sizeof(struct canfd_frame)) - /* particular protocols of the protocol family PF_CAN */ #define CAN_RAW 1 /* RAW sockets */ #define CAN_BCM 2 /* Broadcast Manager */ diff --git a/trunk/include/linux/can/core.h b/trunk/include/linux/can/core.h index 78c6c52073ad..0ccc1cd28b95 100644 --- a/trunk/include/linux/can/core.h +++ b/trunk/include/linux/can/core.h @@ -17,10 +17,10 @@ #include #include -#define CAN_VERSION "20120528" +#define CAN_VERSION "20090105" /* increment this number each time you change some user-space interface */ -#define CAN_ABI_VERSION "9" +#define CAN_ABI_VERSION "8" #define CAN_VERSION_STRING "rev " CAN_VERSION " abi " CAN_ABI_VERSION diff --git a/trunk/include/linux/can/dev.h b/trunk/include/linux/can/dev.h index ee5a771fb20d..5d2efe7e3f1b 100644 --- a/trunk/include/linux/can/dev.h +++ b/trunk/include/linux/can/dev.h @@ -61,40 +61,23 @@ struct can_priv { * To be used in the CAN netdriver receive path to ensure conformance with * ISO 11898-1 Chapter 8.4.2.3 (DLC field) */ -#define get_can_dlc(i) (min_t(__u8, (i), CAN_MAX_DLC)) -#define get_canfd_dlc(i) (min_t(__u8, (i), CANFD_MAX_DLC)) +#define get_can_dlc(i) (min_t(__u8, (i), 8)) /* Drop a given socketbuffer if it does not contain a valid CAN frame. */ static inline int can_dropped_invalid_skb(struct net_device *dev, struct sk_buff *skb) { - const struct canfd_frame *cfd = (struct canfd_frame *)skb->data; - - if (skb->protocol == htons(ETH_P_CAN)) { - if (unlikely(skb->len != CAN_MTU || - cfd->len > CAN_MAX_DLEN)) - goto inval_skb; - } else if (skb->protocol == htons(ETH_P_CANFD)) { - if (unlikely(skb->len != CANFD_MTU || - cfd->len > CANFD_MAX_DLEN)) - goto inval_skb; - } else - goto inval_skb; + const struct can_frame *cf = (struct can_frame *)skb->data; - return 0; + if (unlikely(skb->len != sizeof(*cf) || cf->can_dlc > 8)) { + kfree_skb(skb); + dev->stats.tx_dropped++; + return 1; + } -inval_skb: - kfree_skb(skb); - dev->stats.tx_dropped++; - return 1; + return 0; } -/* get data length from can_dlc with sanitized can_dlc */ -u8 can_dlc2len(u8 can_dlc); - -/* map the sanitized data length to an appropriate data length code */ -u8 can_len2dlc(u8 len); - struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max); void free_candev(struct net_device *dev); diff --git a/trunk/include/linux/can/raw.h b/trunk/include/linux/can/raw.h index a814062b0719..781f3a3701be 100644 --- a/trunk/include/linux/can/raw.h +++ b/trunk/include/linux/can/raw.h @@ -23,8 +23,7 @@ enum { CAN_RAW_FILTER = 1, /* set 0 .. n can_filter(s) */ CAN_RAW_ERR_FILTER, /* set filter for error frames */ CAN_RAW_LOOPBACK, /* local loopback (default:on) */ - CAN_RAW_RECV_OWN_MSGS, /* receive my own msgs (default:off) */ - CAN_RAW_FD_FRAMES, /* allow CAN FD frames (default:off) */ + CAN_RAW_RECV_OWN_MSGS /* receive my own msgs (default:off) */ }; #endif diff --git a/trunk/include/linux/if_ether.h b/trunk/include/linux/if_ether.h index 167ce5b363d2..56d907a2c804 100644 --- a/trunk/include/linux/if_ether.h +++ b/trunk/include/linux/if_ether.h @@ -105,8 +105,7 @@ #define ETH_P_WAN_PPP 0x0007 /* Dummy type for WAN PPP frames*/ #define ETH_P_PPP_MP 0x0008 /* Dummy type for PPP MP frames */ #define ETH_P_LOCALTALK 0x0009 /* Localtalk pseudo type */ -#define ETH_P_CAN 0x000C /* CAN: Controller Area Network */ -#define ETH_P_CANFD 0x000D /* CANFD: CAN flexible data rate*/ +#define ETH_P_CAN 0x000C /* Controller Area Network */ #define ETH_P_PPPTALK 0x0010 /* Dummy type for Atalk over PPP*/ #define ETH_P_TR_802_2 0x0011 /* 802.2 frames */ #define ETH_P_MOBITEX 0x0015 /* Mobitex (kaz@cafe.net) */ diff --git a/trunk/include/linux/if_team.h b/trunk/include/linux/if_team.h index c1938869191f..8185f57a9c7f 100644 --- a/trunk/include/linux/if_team.h +++ b/trunk/include/linux/if_team.h @@ -61,7 +61,6 @@ struct team_port { } orig; struct rcu_head rcu; - long mode_priv[0]; }; struct team_mode_ops { @@ -74,8 +73,6 @@ struct team_mode_ops { int (*port_enter)(struct team *team, struct team_port *port); void (*port_leave)(struct team *team, struct team_port *port); void (*port_change_mac)(struct team *team, struct team_port *port); - void (*port_enabled)(struct team *team, struct team_port *port); - void (*port_disabled)(struct team *team, struct team_port *port); }; enum team_option_type { @@ -85,11 +82,6 @@ enum team_option_type { TEAM_OPTION_TYPE_BOOL, }; -struct team_option_inst_info { - u32 array_index; - struct team_port *port; /* != NULL if per-port */ -}; - struct team_gsetter_ctx { union { u32 u32_val; @@ -100,28 +92,23 @@ struct team_gsetter_ctx { } bin_val; bool bool_val; } data; - struct team_option_inst_info *info; + struct team_port *port; }; struct team_option { struct list_head list; const char *name; bool per_port; - unsigned int array_size; /* != 0 means the option is array */ enum team_option_type type; - int (*init)(struct team *team, struct team_option_inst_info *info); int (*getter)(struct team *team, struct team_gsetter_ctx *ctx); int (*setter)(struct team *team, struct team_gsetter_ctx *ctx); }; -extern void team_option_inst_set_change(struct team_option_inst_info *opt_inst_info); -extern void team_options_change_check(struct team *team); - struct team_mode { + struct list_head list; const char *kind; struct module *owner; size_t priv_size; - size_t port_priv_size; const struct team_mode_ops *ops; }; @@ -191,8 +178,8 @@ extern int team_options_register(struct team *team, extern void team_options_unregister(struct team *team, const struct team_option *option, size_t option_count); -extern int team_mode_register(const struct team_mode *mode); -extern void team_mode_unregister(const struct team_mode *mode); +extern int team_mode_register(struct team_mode *mode); +extern int team_mode_unregister(struct team_mode *mode); #endif /* __KERNEL__ */ @@ -254,7 +241,6 @@ enum { TEAM_ATTR_OPTION_DATA, /* dynamic */ TEAM_ATTR_OPTION_REMOVED, /* flag */ TEAM_ATTR_OPTION_PORT_IFINDEX, /* u32 */ /* for per-port options */ - TEAM_ATTR_OPTION_ARRAY_INDEX, /* u32 */ /* for array options */ __TEAM_ATTR_OPTION_MAX, TEAM_ATTR_OPTION_MAX = __TEAM_ATTR_OPTION_MAX - 1, diff --git a/trunk/include/linux/netfilter.h b/trunk/include/linux/netfilter.h index dca19e61b30a..38b96a54f9a5 100644 --- a/trunk/include/linux/netfilter.h +++ b/trunk/include/linux/netfilter.h @@ -404,7 +404,7 @@ struct nfq_ct_hook { void (*seq_adjust)(struct sk_buff *skb, struct nf_conn *ct, u32 ctinfo, int off); }; -extern struct nfq_ct_hook *nfq_ct_hook; +extern struct nfq_ct_hook __rcu *nfq_ct_hook; #else static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {} #endif diff --git a/trunk/include/net/bluetooth/a2mp.h b/trunk/include/net/bluetooth/a2mp.h deleted file mode 100644 index 6a76e0a0705e..000000000000 --- a/trunk/include/net/bluetooth/a2mp.h +++ /dev/null @@ -1,126 +0,0 @@ -/* - Copyright (c) 2010,2011 Code Aurora Forum. All rights reserved. - Copyright (c) 2011,2012 Intel Corp. - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License version 2 and - only version 2 as published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. -*/ - -#ifndef __A2MP_H -#define __A2MP_H - -#include - -#define A2MP_FEAT_EXT 0x8000 - -struct amp_mgr { - struct l2cap_conn *l2cap_conn; - struct l2cap_chan *a2mp_chan; - struct kref kref; - __u8 ident; - __u8 handle; - unsigned long flags; -}; - -struct a2mp_cmd { - __u8 code; - __u8 ident; - __le16 len; - __u8 data[0]; -} __packed; - -/* A2MP command codes */ -#define A2MP_COMMAND_REJ 0x01 -struct a2mp_cmd_rej { - __le16 reason; - __u8 data[0]; -} __packed; - -#define A2MP_DISCOVER_REQ 0x02 -struct a2mp_discov_req { - __le16 mtu; - __le16 ext_feat; -} __packed; - -struct a2mp_cl { - __u8 id; - __u8 type; - __u8 status; -} __packed; - -#define A2MP_DISCOVER_RSP 0x03 -struct a2mp_discov_rsp { - __le16 mtu; - __le16 ext_feat; - struct a2mp_cl cl[0]; -} __packed; - -#define A2MP_CHANGE_NOTIFY 0x04 -#define A2MP_CHANGE_RSP 0x05 - -#define A2MP_GETINFO_REQ 0x06 -struct a2mp_info_req { - __u8 id; -} __packed; - -#define A2MP_GETINFO_RSP 0x07 -struct a2mp_info_rsp { - __u8 id; - __u8 status; - __le32 total_bw; - __le32 max_bw; - __le32 min_latency; - __le16 pal_cap; - __le16 assoc_size; -} __packed; - -#define A2MP_GETAMPASSOC_REQ 0x08 -struct a2mp_amp_assoc_req { - __u8 id; -} __packed; - -#define A2MP_GETAMPASSOC_RSP 0x09 -struct a2mp_amp_assoc_rsp { - __u8 id; - __u8 status; - __u8 amp_assoc[0]; -} __packed; - -#define A2MP_CREATEPHYSLINK_REQ 0x0A -#define A2MP_DISCONNPHYSLINK_REQ 0x0C -struct a2mp_physlink_req { - __u8 local_id; - __u8 remote_id; - __u8 amp_assoc[0]; -} __packed; - -#define A2MP_CREATEPHYSLINK_RSP 0x0B -#define A2MP_DISCONNPHYSLINK_RSP 0x0D -struct a2mp_physlink_rsp { - __u8 local_id; - __u8 remote_id; - __u8 status; -} __packed; - -/* A2MP response status */ -#define A2MP_STATUS_SUCCESS 0x00 -#define A2MP_STATUS_INVALID_CTRL_ID 0x01 -#define A2MP_STATUS_UNABLE_START_LINK_CREATION 0x02 -#define A2MP_STATUS_NO_PHYSICAL_LINK_EXISTS 0x02 -#define A2MP_STATUS_COLLISION_OCCURED 0x03 -#define A2MP_STATUS_DISCONN_REQ_RECVD 0x04 -#define A2MP_STATUS_PHYS_LINK_EXISTS 0x05 -#define A2MP_STATUS_SECURITY_VIOLATION 0x06 - -void amp_mgr_get(struct amp_mgr *mgr); -int amp_mgr_put(struct amp_mgr *mgr); -struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn, - struct sk_buff *skb); - -#endif /* __A2MP_H */ diff --git a/trunk/include/net/bluetooth/bluetooth.h b/trunk/include/net/bluetooth/bluetooth.h index 565d4bee1e49..961669b648fd 100644 --- a/trunk/include/net/bluetooth/bluetooth.h +++ b/trunk/include/net/bluetooth/bluetooth.h @@ -1,4 +1,4 @@ -/* +/* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2000-2001 Qualcomm Incorporated @@ -12,19 +12,22 @@ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY - CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, - COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ #ifndef __BLUETOOTH_H #define __BLUETOOTH_H +#include +#include +#include #include #include @@ -165,8 +168,8 @@ typedef struct { #define BDADDR_LE_PUBLIC 0x01 #define BDADDR_LE_RANDOM 0x02 -#define BDADDR_ANY (&(bdaddr_t) {{0, 0, 0, 0, 0, 0} }) -#define BDADDR_LOCAL (&(bdaddr_t) {{0, 0, 0, 0xff, 0xff, 0xff} }) +#define BDADDR_ANY (&(bdaddr_t) {{0, 0, 0, 0, 0, 0}}) +#define BDADDR_LOCAL (&(bdaddr_t) {{0, 0, 0, 0xff, 0xff, 0xff}}) /* Copy, swap, convert BD Address */ static inline int bacmp(bdaddr_t *ba1, bdaddr_t *ba2) @@ -212,7 +215,7 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags); int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags); -uint bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait); +uint bt_sock_poll(struct file * file, struct socket *sock, poll_table *wait); int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo); @@ -222,12 +225,12 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock); /* Skb helpers */ struct l2cap_ctrl { - unsigned int sframe:1, - poll:1, - final:1, - fcs:1, - sar:2, - super:2; + unsigned int sframe : 1, + poll : 1, + final : 1, + fcs : 1, + sar : 2, + super : 2; __u16 reqseq; __u16 txseq; __u8 retries; @@ -246,8 +249,7 @@ static inline struct sk_buff *bt_skb_alloc(unsigned int len, gfp_t how) { struct sk_buff *skb; - skb = alloc_skb(len + BT_SKB_RESERVE, how); - if (skb) { + if ((skb = alloc_skb(len + BT_SKB_RESERVE, how))) { skb_reserve(skb, BT_SKB_RESERVE); bt_cb(skb)->incoming = 0; } @@ -259,8 +261,7 @@ static inline struct sk_buff *bt_skb_send_alloc(struct sock *sk, { struct sk_buff *skb; - skb = sock_alloc_send_skb(sk, len + BT_SKB_RESERVE, nb, err); - if (skb) { + if ((skb = sock_alloc_send_skb(sk, len + BT_SKB_RESERVE, nb, err))) { skb_reserve(skb, BT_SKB_RESERVE); bt_cb(skb)->incoming = 0; } diff --git a/trunk/include/net/bluetooth/hci.h b/trunk/include/net/bluetooth/hci.h index 2a6b0b8b7120..66a7b579e31c 100644 --- a/trunk/include/net/bluetooth/hci.h +++ b/trunk/include/net/bluetooth/hci.h @@ -30,9 +30,6 @@ #define HCI_MAX_EVENT_SIZE 260 #define HCI_MAX_FRAME_SIZE (HCI_MAX_ACL_SIZE + 4) -#define HCI_LINK_KEY_SIZE 16 -#define HCI_AMP_LINK_KEY_SIZE (2 * HCI_LINK_KEY_SIZE) - /* HCI dev events */ #define HCI_DEV_REG 1 #define HCI_DEV_UNREG 2 @@ -59,12 +56,9 @@ #define HCI_BREDR 0x00 #define HCI_AMP 0x01 -/* First BR/EDR Controller shall have ID = 0 */ -#define HCI_BREDR_ID 0 - /* HCI device quirks */ enum { - HCI_QUIRK_RESET_ON_CLOSE, + HCI_QUIRK_NO_RESET, HCI_QUIRK_RAW_DEVICE, HCI_QUIRK_FIXUP_BUFFER_SIZE }; @@ -139,8 +133,10 @@ enum { #define HCIINQUIRY _IOR('H', 240, int) /* HCI timeouts */ +#define HCI_CONNECT_TIMEOUT (40000) /* 40 seconds */ #define HCI_DISCONN_TIMEOUT (2000) /* 2 seconds */ #define HCI_PAIRING_TIMEOUT (60000) /* 60 seconds */ +#define HCI_IDLE_TIMEOUT (6000) /* 6 seconds */ #define HCI_INIT_TIMEOUT (10000) /* 10 seconds */ #define HCI_CMD_TIMEOUT (1000) /* 1 seconds */ #define HCI_ACL_TX_TIMEOUT (45000) /* 45 seconds */ @@ -375,7 +371,7 @@ struct hci_cp_reject_conn_req { #define HCI_OP_LINK_KEY_REPLY 0x040b struct hci_cp_link_key_reply { bdaddr_t bdaddr; - __u8 link_key[HCI_LINK_KEY_SIZE]; + __u8 link_key[16]; } __packed; #define HCI_OP_LINK_KEY_NEG_REPLY 0x040c @@ -527,28 +523,6 @@ struct hci_cp_io_capability_neg_reply { __u8 reason; } __packed; -#define HCI_OP_CREATE_PHY_LINK 0x0435 -struct hci_cp_create_phy_link { - __u8 phy_handle; - __u8 key_len; - __u8 key_type; - __u8 key[HCI_AMP_LINK_KEY_SIZE]; -} __packed; - -#define HCI_OP_ACCEPT_PHY_LINK 0x0436 -struct hci_cp_accept_phy_link { - __u8 phy_handle; - __u8 key_len; - __u8 key_type; - __u8 key[HCI_AMP_LINK_KEY_SIZE]; -} __packed; - -#define HCI_OP_DISCONN_PHY_LINK 0x0437 -struct hci_cp_disconn_phy_link { - __u8 phy_handle; - __u8 reason; -} __packed; - #define HCI_OP_SNIFF_MODE 0x0803 struct hci_cp_sniff_mode { __le16 handle; @@ -844,31 +818,6 @@ struct hci_rp_read_local_amp_info { __le32 be_flush_to; } __packed; -#define HCI_OP_READ_LOCAL_AMP_ASSOC 0x140a -struct hci_cp_read_local_amp_assoc { - __u8 phy_handle; - __le16 len_so_far; - __le16 max_len; -} __packed; -struct hci_rp_read_local_amp_assoc { - __u8 status; - __u8 phy_handle; - __le16 rem_len; - __u8 frag[0]; -} __packed; - -#define HCI_OP_WRITE_REMOTE_AMP_ASSOC 0x140b -struct hci_cp_write_remote_amp_assoc { - __u8 phy_handle; - __le16 len_so_far; - __le16 rem_len; - __u8 frag[0]; -} __packed; -struct hci_rp_write_remote_amp_assoc { - __u8 status; - __u8 phy_handle; -} __packed; - #define HCI_OP_LE_SET_EVENT_MASK 0x2001 struct hci_cp_le_set_event_mask { __u8 mask[8]; @@ -1099,7 +1048,7 @@ struct hci_ev_link_key_req { #define HCI_EV_LINK_KEY_NOTIFY 0x18 struct hci_ev_link_key_notify { bdaddr_t bdaddr; - __u8 link_key[HCI_LINK_KEY_SIZE]; + __u8 link_key[16]; __u8 key_type; } __packed; @@ -1195,12 +1144,6 @@ struct extended_inquiry_info { __u8 data[240]; } __packed; -#define HCI_EV_KEY_REFRESH_COMPLETE 0x30 -struct hci_ev_key_refresh_complete { - __u8 status; - __le16 handle; -} __packed; - #define HCI_EV_IO_CAPA_REQUEST 0x31 struct hci_ev_io_capa_request { bdaddr_t bdaddr; @@ -1247,39 +1190,6 @@ struct hci_ev_le_meta { __u8 subevent; } __packed; -#define HCI_EV_PHY_LINK_COMPLETE 0x40 -struct hci_ev_phy_link_complete { - __u8 status; - __u8 phy_handle; -} __packed; - -#define HCI_EV_CHANNEL_SELECTED 0x41 -struct hci_ev_channel_selected { - __u8 phy_handle; -} __packed; - -#define HCI_EV_DISCONN_PHY_LINK_COMPLETE 0x42 -struct hci_ev_disconn_phy_link_complete { - __u8 status; - __u8 phy_handle; - __u8 reason; -} __packed; - -#define HCI_EV_LOGICAL_LINK_COMPLETE 0x45 -struct hci_ev_logical_link_complete { - __u8 status; - __le16 handle; - __u8 phy_handle; - __u8 flow_spec_id; -} __packed; - -#define HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE 0x46 -struct hci_ev_disconn_logical_link_complete { - __u8 status; - __le16 handle; - __u8 reason; -} __packed; - #define HCI_EV_NUM_COMP_BLOCKS 0x48 struct hci_comp_blocks_info { __le16 handle; @@ -1380,6 +1290,7 @@ struct hci_sco_hdr { __u8 dlen; } __packed; +#include static inline struct hci_event_hdr *hci_event_hdr(const struct sk_buff *skb) { return (struct hci_event_hdr *) skb->data; @@ -1396,12 +1307,12 @@ static inline struct hci_sco_hdr *hci_sco_hdr(const struct sk_buff *skb) } /* Command opcode pack/unpack */ -#define hci_opcode_pack(ogf, ocf) ((__u16) ((ocf & 0x03ff)|(ogf << 10))) +#define hci_opcode_pack(ogf, ocf) (__u16) ((ocf & 0x03ff)|(ogf << 10)) #define hci_opcode_ogf(op) (op >> 10) #define hci_opcode_ocf(op) (op & 0x03ff) /* ACL handle and flags pack/unpack */ -#define hci_handle_pack(h, f) ((__u16) ((h & 0x0fff)|(f << 12))) +#define hci_handle_pack(h, f) (__u16) ((h & 0x0fff)|(f << 12)) #define hci_handle(h) (h & 0x0fff) #define hci_flags(h) (h >> 12) diff --git a/trunk/include/net/bluetooth/hci_core.h b/trunk/include/net/bluetooth/hci_core.h index 20fd57367ddc..9fc7728f94e4 100644 --- a/trunk/include/net/bluetooth/hci_core.h +++ b/trunk/include/net/bluetooth/hci_core.h @@ -25,6 +25,7 @@ #ifndef __HCI_CORE_H #define __HCI_CORE_H +#include #include /* HCI priority */ @@ -64,7 +65,7 @@ struct discovery_state { DISCOVERY_RESOLVING, DISCOVERY_STOPPING, } state; - struct list_head all; /* All devices found during inquiry */ + struct list_head all; /* All devices found during inquiry */ struct list_head unknown; /* Name state not known */ struct list_head resolve; /* Name needs to be resolved */ __u32 timestamp; @@ -104,7 +105,7 @@ struct link_key { struct list_head list; bdaddr_t bdaddr; u8 type; - u8 val[HCI_LINK_KEY_SIZE]; + u8 val[16]; u8 pin_len; }; @@ -332,7 +333,6 @@ struct hci_conn { void *l2cap_data; void *sco_data; void *smp_conn; - struct amp_mgr *amp_mgr; struct hci_conn *link; @@ -360,8 +360,7 @@ extern int l2cap_connect_cfm(struct hci_conn *hcon, u8 status); extern int l2cap_disconn_ind(struct hci_conn *hcon); extern int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason); extern int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt); -extern int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, - u16 flags); +extern int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags); extern int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr); extern int sco_connect_cfm(struct hci_conn *hcon, __u8 status); @@ -430,8 +429,8 @@ enum { static inline bool hci_conn_ssp_enabled(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; - return test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) && - test_bit(HCI_CONN_SSP_ENABLED, &conn->flags); + return (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) && + test_bit(HCI_CONN_SSP_ENABLED, &conn->flags)); } static inline void hci_conn_hash_init(struct hci_dev *hdev) @@ -641,19 +640,6 @@ static inline void hci_set_drvdata(struct hci_dev *hdev, void *data) dev_set_drvdata(&hdev->dev, data); } -/* hci_dev_list shall be locked */ -static inline uint8_t __hci_num_ctrl(void) -{ - uint8_t count = 0; - struct list_head *p; - - list_for_each(p, &hci_dev_list) { - count++; - } - - return count; -} - struct hci_dev *hci_dev_get(int index); struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst); @@ -675,8 +661,7 @@ int hci_get_conn_info(struct hci_dev *hdev, void __user *arg); int hci_get_auth_info(struct hci_dev *hdev, void __user *arg); int hci_inquiry(void __user *arg); -struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, - bdaddr_t *bdaddr); +struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr); int hci_blacklist_clear(struct hci_dev *hdev); int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type); int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type); diff --git a/trunk/include/net/bluetooth/l2cap.h b/trunk/include/net/bluetooth/l2cap.h index d80e3f0691b4..1c7d1cd5e679 100644 --- a/trunk/include/net/bluetooth/l2cap.h +++ b/trunk/include/net/bluetooth/l2cap.h @@ -40,11 +40,11 @@ #define L2CAP_DEFAULT_MONITOR_TO 12000 /* 12 seconds */ #define L2CAP_DEFAULT_MAX_PDU_SIZE 1009 /* Sized for 3-DH5 packet */ #define L2CAP_DEFAULT_ACK_TO 200 +#define L2CAP_LE_DEFAULT_MTU 23 #define L2CAP_DEFAULT_MAX_SDU_SIZE 0xFFFF #define L2CAP_DEFAULT_SDU_ITIME 0xFFFFFFFF #define L2CAP_DEFAULT_ACC_LAT 0xFFFFFFFF #define L2CAP_BREDR_MAX_PAYLOAD 1019 /* 3-DH5 packet */ -#define L2CAP_LE_MIN_MTU 23 #define L2CAP_DISC_TIMEOUT msecs_to_jiffies(100) #define L2CAP_DISC_REJ_TIMEOUT msecs_to_jiffies(5000) @@ -52,8 +52,6 @@ #define L2CAP_CONN_TIMEOUT msecs_to_jiffies(40000) #define L2CAP_INFO_TIMEOUT msecs_to_jiffies(4000) -#define L2CAP_A2MP_DEFAULT_MTU 670 - /* L2CAP socket address */ struct sockaddr_l2 { sa_family_t l2_family; @@ -231,14 +229,9 @@ struct l2cap_conn_rsp { __le16 status; } __packed; -/* protocol/service multiplexer (PSM) */ -#define L2CAP_PSM_SDP 0x0001 -#define L2CAP_PSM_RFCOMM 0x0003 - /* channel indentifier */ #define L2CAP_CID_SIGNALING 0x0001 #define L2CAP_CID_CONN_LESS 0x0002 -#define L2CAP_CID_A2MP 0x0003 #define L2CAP_CID_LE_DATA 0x0004 #define L2CAP_CID_LE_SIGNALING 0x0005 #define L2CAP_CID_SMP 0x0006 @@ -278,9 +271,6 @@ struct l2cap_conf_rsp { #define L2CAP_CONF_PENDING 0x0004 #define L2CAP_CONF_EFS_REJECT 0x0005 -/* configuration req/rsp continuation flag */ -#define L2CAP_CONF_FLAG_CONTINUATION 0x0001 - struct l2cap_conf_opt { __u8 type; __u8 len; @@ -429,6 +419,11 @@ struct l2cap_seq_list { #define L2CAP_SEQ_LIST_CLEAR 0xFFFF #define L2CAP_SEQ_LIST_TAIL 0x8000 +struct srej_list { + __u16 tx_seq; + struct list_head list; +}; + struct l2cap_chan { struct sock *sk; @@ -480,12 +475,14 @@ struct l2cap_chan { __u16 expected_ack_seq; __u16 expected_tx_seq; __u16 buffer_seq; + __u16 buffer_seq_srej; __u16 srej_save_reqseq; __u16 last_acked_seq; __u16 frames_sent; __u16 unacked_frames; __u8 retry_count; __u16 srej_queue_next; + __u8 num_acked; __u16 sdu_len; struct sk_buff *sdu; struct sk_buff *sdu_last_frag; @@ -518,6 +515,7 @@ struct l2cap_chan { struct sk_buff_head srej_q; struct l2cap_seq_list srej_list; struct l2cap_seq_list retrans_list; + struct list_head srej_l; struct list_head list; struct list_head global_l; @@ -530,14 +528,10 @@ struct l2cap_chan { struct l2cap_ops { char *name; - struct l2cap_chan *(*new_connection) (struct l2cap_chan *chan); - int (*recv) (struct l2cap_chan * chan, - struct sk_buff *skb); - void (*teardown) (struct l2cap_chan *chan, int err); - void (*close) (struct l2cap_chan *chan); - void (*state_change) (struct l2cap_chan *chan, - int state); - void (*ready) (struct l2cap_chan *chan); + struct l2cap_chan *(*new_connection) (void *data); + int (*recv) (void *data, struct sk_buff *skb); + void (*close) (void *data); + void (*state_change) (void *data, int state); struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan, unsigned long len, int nb); }; @@ -581,7 +575,6 @@ struct l2cap_conn { #define L2CAP_CHAN_RAW 1 #define L2CAP_CHAN_CONN_LESS 2 #define L2CAP_CHAN_CONN_ORIENTED 3 -#define L2CAP_CHAN_CONN_FIX_A2MP 4 /* ----- L2CAP socket info ----- */ #define l2cap_pi(sk) ((struct l2cap_pinfo *) sk) @@ -604,7 +597,6 @@ enum { CONF_EWS_RECV, CONF_LOC_CONF_PEND, CONF_REM_CONF_PEND, - CONF_NOT_COMPLETE, }; #define L2CAP_CONF_MAX_CONF_REQ 2 @@ -721,7 +713,11 @@ static inline bool l2cap_clear_timer(struct l2cap_chan *chan, #define __set_chan_timer(c, t) l2cap_set_timer(c, &c->chan_timer, (t)) #define __clear_chan_timer(c) l2cap_clear_timer(c, &c->chan_timer) +#define __set_retrans_timer(c) l2cap_set_timer(c, &c->retrans_timer, \ + msecs_to_jiffies(L2CAP_DEFAULT_RETRANS_TO)); #define __clear_retrans_timer(c) l2cap_clear_timer(c, &c->retrans_timer) +#define __set_monitor_timer(c) l2cap_set_timer(c, &c->monitor_timer, \ + msecs_to_jiffies(L2CAP_DEFAULT_MONITOR_TO)); #define __clear_monitor_timer(c) l2cap_clear_timer(c, &c->monitor_timer) #define __set_ack_timer(c) l2cap_set_timer(c, &chan->ack_timer, \ msecs_to_jiffies(L2CAP_DEFAULT_ACK_TO)); @@ -740,17 +736,173 @@ static inline __u16 __next_seq(struct l2cap_chan *chan, __u16 seq) return (seq + 1) % (chan->tx_win_max + 1); } -static inline struct l2cap_chan *l2cap_chan_no_new_connection(struct l2cap_chan *chan) +static inline int l2cap_tx_window_full(struct l2cap_chan *ch) +{ + int sub; + + sub = (ch->next_tx_seq - ch->expected_ack_seq) % 64; + + if (sub < 0) + sub += 64; + + return sub == ch->remote_tx_win; +} + +static inline __u16 __get_reqseq(struct l2cap_chan *chan, __u32 ctrl) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return (ctrl & L2CAP_EXT_CTRL_REQSEQ) >> + L2CAP_EXT_CTRL_REQSEQ_SHIFT; + else + return (ctrl & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT; +} + +static inline __u32 __set_reqseq(struct l2cap_chan *chan, __u32 reqseq) { - return NULL; + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return (reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT) & + L2CAP_EXT_CTRL_REQSEQ; + else + return (reqseq << L2CAP_CTRL_REQSEQ_SHIFT) & L2CAP_CTRL_REQSEQ; } -static inline void l2cap_chan_no_teardown(struct l2cap_chan *chan, int err) +static inline __u16 __get_txseq(struct l2cap_chan *chan, __u32 ctrl) { + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return (ctrl & L2CAP_EXT_CTRL_TXSEQ) >> + L2CAP_EXT_CTRL_TXSEQ_SHIFT; + else + return (ctrl & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT; } -static inline void l2cap_chan_no_ready(struct l2cap_chan *chan) +static inline __u32 __set_txseq(struct l2cap_chan *chan, __u32 txseq) { + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return (txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT) & + L2CAP_EXT_CTRL_TXSEQ; + else + return (txseq << L2CAP_CTRL_TXSEQ_SHIFT) & L2CAP_CTRL_TXSEQ; +} + +static inline bool __is_sframe(struct l2cap_chan *chan, __u32 ctrl) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return ctrl & L2CAP_EXT_CTRL_FRAME_TYPE; + else + return ctrl & L2CAP_CTRL_FRAME_TYPE; +} + +static inline __u32 __set_sframe(struct l2cap_chan *chan) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return L2CAP_EXT_CTRL_FRAME_TYPE; + else + return L2CAP_CTRL_FRAME_TYPE; +} + +static inline __u8 __get_ctrl_sar(struct l2cap_chan *chan, __u32 ctrl) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return (ctrl & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT; + else + return (ctrl & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT; +} + +static inline __u32 __set_ctrl_sar(struct l2cap_chan *chan, __u32 sar) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return (sar << L2CAP_EXT_CTRL_SAR_SHIFT) & L2CAP_EXT_CTRL_SAR; + else + return (sar << L2CAP_CTRL_SAR_SHIFT) & L2CAP_CTRL_SAR; +} + +static inline bool __is_sar_start(struct l2cap_chan *chan, __u32 ctrl) +{ + return __get_ctrl_sar(chan, ctrl) == L2CAP_SAR_START; +} + +static inline __u32 __get_sar_mask(struct l2cap_chan *chan) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return L2CAP_EXT_CTRL_SAR; + else + return L2CAP_CTRL_SAR; +} + +static inline __u8 __get_ctrl_super(struct l2cap_chan *chan, __u32 ctrl) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return (ctrl & L2CAP_EXT_CTRL_SUPERVISE) >> + L2CAP_EXT_CTRL_SUPER_SHIFT; + else + return (ctrl & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT; +} + +static inline __u32 __set_ctrl_super(struct l2cap_chan *chan, __u32 super) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return (super << L2CAP_EXT_CTRL_SUPER_SHIFT) & + L2CAP_EXT_CTRL_SUPERVISE; + else + return (super << L2CAP_CTRL_SUPER_SHIFT) & + L2CAP_CTRL_SUPERVISE; +} + +static inline __u32 __set_ctrl_final(struct l2cap_chan *chan) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return L2CAP_EXT_CTRL_FINAL; + else + return L2CAP_CTRL_FINAL; +} + +static inline bool __is_ctrl_final(struct l2cap_chan *chan, __u32 ctrl) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return ctrl & L2CAP_EXT_CTRL_FINAL; + else + return ctrl & L2CAP_CTRL_FINAL; +} + +static inline __u32 __set_ctrl_poll(struct l2cap_chan *chan) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return L2CAP_EXT_CTRL_POLL; + else + return L2CAP_CTRL_POLL; +} + +static inline bool __is_ctrl_poll(struct l2cap_chan *chan, __u32 ctrl) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return ctrl & L2CAP_EXT_CTRL_POLL; + else + return ctrl & L2CAP_CTRL_POLL; +} + +static inline __u32 __get_control(struct l2cap_chan *chan, void *p) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return get_unaligned_le32(p); + else + return get_unaligned_le16(p); +} + +static inline void __put_control(struct l2cap_chan *chan, __u32 control, + void *p) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return put_unaligned_le32(control, p); + else + return put_unaligned_le16(control, p); +} + +static inline __u8 __ctrl_size(struct l2cap_chan *chan) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return L2CAP_EXT_HDR_SIZE - L2CAP_HDR_SIZE; + else + return L2CAP_ENH_HDR_SIZE - L2CAP_HDR_SIZE; } extern bool disable_ertm; @@ -774,8 +926,5 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len, void l2cap_chan_busy(struct l2cap_chan *chan, int busy); int l2cap_chan_check_security(struct l2cap_chan *chan); void l2cap_chan_set_defaults(struct l2cap_chan *chan); -int l2cap_ertm_init(struct l2cap_chan *chan); -void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan); -void l2cap_chan_del(struct l2cap_chan *chan, int err); #endif /* __L2CAP_H */ diff --git a/trunk/include/net/flow.h b/trunk/include/net/flow.h index bd524f598561..6c469dbdb917 100644 --- a/trunk/include/net/flow.h +++ b/trunk/include/net/flow.h @@ -22,7 +22,6 @@ struct flowi_common { #define FLOWI_FLAG_ANYSRC 0x01 #define FLOWI_FLAG_PRECOW_METRICS 0x02 #define FLOWI_FLAG_CAN_SLEEP 0x04 -#define FLOWI_FLAG_RT_NOCACHE 0x08 __u32 flowic_secid; }; diff --git a/trunk/include/net/inet_connection_sock.h b/trunk/include/net/inet_connection_sock.h index af3c743a40e4..e1b7734c456f 100644 --- a/trunk/include/net/inet_connection_sock.h +++ b/trunk/include/net/inet_connection_sock.h @@ -251,8 +251,7 @@ extern int inet_csk_get_port(struct sock *sk, unsigned short snum); extern struct dst_entry* inet_csk_route_req(struct sock *sk, struct flowi4 *fl4, - const struct request_sock *req, - bool nocache); + const struct request_sock *req); extern struct dst_entry* inet_csk_route_child_sock(struct sock *sk, struct sock *newsk, const struct request_sock *req); diff --git a/trunk/include/net/inet_hashtables.h b/trunk/include/net/inet_hashtables.h index 54be0287eb98..808fc5f76b03 100644 --- a/trunk/include/net/inet_hashtables.h +++ b/trunk/include/net/inet_hashtables.h @@ -379,10 +379,10 @@ static inline struct sock *__inet_lookup_skb(struct inet_hashinfo *hashinfo, const __be16 sport, const __be16 dport) { - struct sock *sk = skb_steal_sock(skb); + struct sock *sk; const struct iphdr *iph = ip_hdr(skb); - if (sk) + if (unlikely(sk = skb_steal_sock(skb))) return sk; else return __inet_lookup(dev_net(skb_dst(skb)->dev), hashinfo, diff --git a/trunk/include/net/ip.h b/trunk/include/net/ip.h index 50841bd6f10e..83e0619f59d0 100644 --- a/trunk/include/net/ip.h +++ b/trunk/include/net/ip.h @@ -210,9 +210,6 @@ extern int inet_peer_threshold; extern int inet_peer_minttl; extern int inet_peer_maxttl; -/* From ip_input.c */ -extern int sysctl_ip_early_demux; - /* From ip_output.c */ extern int sysctl_ip_dynaddr; diff --git a/trunk/include/net/mac80211.h b/trunk/include/net/mac80211.h index 6914f9978aea..d152f54064fd 100644 --- a/trunk/include/net/mac80211.h +++ b/trunk/include/net/mac80211.h @@ -1945,11 +1945,6 @@ enum ieee80211_rate_control_changed { * to also unregister the device. If it returns 1, then mac80211 * will also go through the regular complete restart on resume. * - * @set_wakeup: Enable or disable wakeup when WoWLAN configuration is - * modified. The reason is that device_set_wakeup_enable() is - * supposed to be called when the configuration changes, not only - * in suspend(). - * * @add_interface: Called when a netdevice attached to the hardware is * enabled. Because it is not called for monitor mode devices, @start * and @stop must be implemented. @@ -2979,7 +2974,6 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw, * ieee80211_generic_frame_duration - Calculate the duration field for a frame * @hw: pointer obtained from ieee80211_alloc_hw(). * @vif: &struct ieee80211_vif pointer from the add_interface callback. - * @band: the band to calculate the frame duration on * @frame_len: the length of the frame. * @rate: the rate at which the frame is going to be transmitted. * diff --git a/trunk/include/net/protocol.h b/trunk/include/net/protocol.h index 967b926cbfb1..875f4895b033 100644 --- a/trunk/include/net/protocol.h +++ b/trunk/include/net/protocol.h @@ -29,15 +29,11 @@ #include #endif -/* This is one larger than the largest protocol value that can be - * found in an ipv4 or ipv6 header. Since in both cases the protocol - * value is presented in a __u8, this is defined to be 256. - */ -#define MAX_INET_PROTOS 256 +#define MAX_INET_PROTOS 256 /* Must be a power of 2 */ + /* This is used to register protocols. */ struct net_protocol { - int (*early_demux)(struct sk_buff *skb); int (*handler)(struct sk_buff *skb); void (*err_handler)(struct sk_buff *skb, u32 info); int (*gso_send_check)(struct sk_buff *skb); diff --git a/trunk/include/net/sock.h b/trunk/include/net/sock.h index 87b424ae750a..4a4521699563 100644 --- a/trunk/include/net/sock.h +++ b/trunk/include/net/sock.h @@ -319,7 +319,6 @@ struct sock { unsigned long sk_flags; struct dst_entry *sk_dst_cache; spinlock_t sk_dst_lock; - struct dst_entry *sk_rx_dst; atomic_t sk_wmem_alloc; atomic_t sk_omem_alloc; int sk_sndbuf; @@ -1427,7 +1426,6 @@ extern struct sk_buff *sock_rmalloc(struct sock *sk, gfp_t priority); extern void sock_wfree(struct sk_buff *skb); extern void sock_rfree(struct sk_buff *skb); -extern void sock_edemux(struct sk_buff *skb); extern int sock_setsockopt(struct socket *sock, int level, int op, char __user *optval, diff --git a/trunk/include/net/tcp.h b/trunk/include/net/tcp.h index 6660ffc4963d..9332f342259a 100644 --- a/trunk/include/net/tcp.h +++ b/trunk/include/net/tcp.h @@ -325,7 +325,6 @@ extern void tcp_v4_err(struct sk_buff *skb, u32); extern void tcp_shutdown (struct sock *sk, int how); -extern int tcp_v4_early_demux(struct sk_buff *skb); extern int tcp_v4_rcv(struct sk_buff *skb); extern struct inet_peer *tcp_v4_get_peer(struct sock *sk); diff --git a/trunk/net/batman-adv/bat_algo.h b/trunk/net/batman-adv/bat_algo.h index a0ba3bff9b36..9852a688ba43 100644 --- a/trunk/net/batman-adv/bat_algo.h +++ b/trunk/net/batman-adv/bat_algo.h @@ -1,4 +1,5 @@ -/* Copyright (C) 2011-2012 B.A.T.M.A.N. contributors: +/* + * Copyright (C) 2011-2012 B.A.T.M.A.N. contributors: * * Marek Lindner * @@ -15,11 +16,12 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA + * */ #ifndef _NET_BATMAN_ADV_BAT_ALGO_H_ #define _NET_BATMAN_ADV_BAT_ALGO_H_ -int batadv_iv_init(void); +int bat_iv_init(void); #endif /* _NET_BATMAN_ADV_BAT_ALGO_H_ */ diff --git a/trunk/net/batman-adv/bat_debugfs.c b/trunk/net/batman-adv/bat_debugfs.c index 4001c57a25e4..db8273c26989 100644 --- a/trunk/net/batman-adv/bat_debugfs.c +++ b/trunk/net/batman-adv/bat_debugfs.c @@ -1,4 +1,5 @@ -/* Copyright (C) 2010-2012 B.A.T.M.A.N. contributors: +/* + * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors: * * Marek Lindner * @@ -15,6 +16,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA + * */ #include "main.h" @@ -74,7 +76,7 @@ static int fdebug_log(struct debug_log *debug_log, const char *fmt, ...) return 0; } -int batadv_debug_log(struct bat_priv *bat_priv, const char *fmt, ...) +int debug_log(struct bat_priv *bat_priv, const char *fmt, ...) { va_list args; char tmp_log_buf[256]; @@ -92,13 +94,13 @@ static int log_open(struct inode *inode, struct file *file) { nonseekable_open(inode, file); file->private_data = inode->i_private; - batadv_inc_module_count(); + inc_module_count(); return 0; } static int log_release(struct inode *inode, struct file *file) { - batadv_dec_module_count(); + dec_module_count(); return 0; } @@ -222,46 +224,45 @@ static void debug_log_cleanup(struct bat_priv *bat_priv) static int bat_algorithms_open(struct inode *inode, struct file *file) { - return single_open(file, batadv_algo_seq_print_text, NULL); + return single_open(file, bat_algo_seq_print_text, NULL); } static int originators_open(struct inode *inode, struct file *file) { struct net_device *net_dev = (struct net_device *)inode->i_private; - return single_open(file, batadv_orig_seq_print_text, net_dev); + return single_open(file, orig_seq_print_text, net_dev); } static int gateways_open(struct inode *inode, struct file *file) { struct net_device *net_dev = (struct net_device *)inode->i_private; - return single_open(file, batadv_gw_client_seq_print_text, net_dev); + return single_open(file, gw_client_seq_print_text, net_dev); } static int transtable_global_open(struct inode *inode, struct file *file) { struct net_device *net_dev = (struct net_device *)inode->i_private; - return single_open(file, batadv_tt_global_seq_print_text, net_dev); + return single_open(file, tt_global_seq_print_text, net_dev); } #ifdef CONFIG_BATMAN_ADV_BLA static int bla_claim_table_open(struct inode *inode, struct file *file) { struct net_device *net_dev = (struct net_device *)inode->i_private; - return single_open(file, batadv_bla_claim_table_seq_print_text, - net_dev); + return single_open(file, bla_claim_table_seq_print_text, net_dev); } #endif static int transtable_local_open(struct inode *inode, struct file *file) { struct net_device *net_dev = (struct net_device *)inode->i_private; - return single_open(file, batadv_tt_local_seq_print_text, net_dev); + return single_open(file, tt_local_seq_print_text, net_dev); } static int vis_data_open(struct inode *inode, struct file *file) { struct net_device *net_dev = (struct net_device *)inode->i_private; - return single_open(file, batadv_vis_seq_print_text, net_dev); + return single_open(file, vis_seq_print_text, net_dev); } struct bat_debuginfo { @@ -303,7 +304,7 @@ static struct bat_debuginfo *mesh_debuginfos[] = { NULL, }; -void batadv_debugfs_init(void) +void debugfs_init(void) { struct bat_debuginfo *bat_debug; struct dentry *file; @@ -326,7 +327,7 @@ void batadv_debugfs_init(void) return; } -void batadv_debugfs_destroy(void) +void debugfs_destroy(void) { if (bat_debugfs) { debugfs_remove_recursive(bat_debugfs); @@ -334,7 +335,7 @@ void batadv_debugfs_destroy(void) } } -int batadv_debugfs_add_meshif(struct net_device *dev) +int debugfs_add_meshif(struct net_device *dev) { struct bat_priv *bat_priv = netdev_priv(dev); struct bat_debuginfo **bat_debug; @@ -347,7 +348,7 @@ int batadv_debugfs_add_meshif(struct net_device *dev) if (!bat_priv->debug_dir) goto out; - if (batadv_socket_setup(bat_priv) < 0) + if (bat_socket_setup(bat_priv) < 0) goto rem_attr; if (debug_log_setup(bat_priv) < 0) @@ -377,7 +378,7 @@ int batadv_debugfs_add_meshif(struct net_device *dev) #endif /* CONFIG_DEBUG_FS */ } -void batadv_debugfs_del_meshif(struct net_device *dev) +void debugfs_del_meshif(struct net_device *dev) { struct bat_priv *bat_priv = netdev_priv(dev); diff --git a/trunk/net/batman-adv/bat_debugfs.h b/trunk/net/batman-adv/bat_debugfs.h index eb0d576b4f9d..d605c6746428 100644 --- a/trunk/net/batman-adv/bat_debugfs.h +++ b/trunk/net/batman-adv/bat_debugfs.h @@ -1,4 +1,5 @@ -/* Copyright (C) 2010-2012 B.A.T.M.A.N. contributors: +/* + * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors: * * Marek Lindner * @@ -15,16 +16,18 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA + * */ + #ifndef _NET_BATMAN_ADV_DEBUGFS_H_ #define _NET_BATMAN_ADV_DEBUGFS_H_ #define DEBUGFS_BAT_SUBDIR "batman_adv" -void batadv_debugfs_init(void); -void batadv_debugfs_destroy(void); -int batadv_debugfs_add_meshif(struct net_device *dev); -void batadv_debugfs_del_meshif(struct net_device *dev); +void debugfs_init(void); +void debugfs_destroy(void); +int debugfs_add_meshif(struct net_device *dev); +void debugfs_del_meshif(struct net_device *dev); #endif /* _NET_BATMAN_ADV_DEBUGFS_H_ */ diff --git a/trunk/net/batman-adv/bat_iv_ogm.c b/trunk/net/batman-adv/bat_iv_ogm.c index 94859d45ed6e..6e0859f4a6a9 100644 --- a/trunk/net/batman-adv/bat_iv_ogm.c +++ b/trunk/net/batman-adv/bat_iv_ogm.c @@ -1,4 +1,5 @@ -/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: +/* + * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * @@ -15,6 +16,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA + * */ #include "main.h" @@ -136,10 +138,7 @@ static uint8_t hop_penalty(uint8_t tq, const struct bat_priv *bat_priv) static int bat_iv_ogm_aggr_packet(int buff_pos, int packet_len, int tt_num_changes) { - int next_buff_pos = 0; - - next_buff_pos += buff_pos + BATMAN_OGM_HLEN; - next_buff_pos += batadv_tt_len(tt_num_changes); + int next_buff_pos = buff_pos + BATMAN_OGM_HLEN + tt_len(tt_num_changes); return (next_buff_pos <= packet_len) && (next_buff_pos <= MAX_AGGREGATION_BYTES); @@ -168,8 +167,7 @@ static void bat_iv_ogm_send_to_if(struct forw_packet *forw_packet, batman_ogm_packet->tt_num_changes)) { /* we might have aggregated direct link packets with an - * ordinary base packet - */ + * ordinary base packet */ if ((forw_packet->direct_link_flags & (1 << packet_num)) && (forw_packet->if_incoming == hard_iface)) batman_ogm_packet->flags |= DIRECTLINK; @@ -190,8 +188,8 @@ static void bat_iv_ogm_send_to_if(struct forw_packet *forw_packet, batman_ogm_packet->ttvn, hard_iface->net_dev->name, hard_iface->net_dev->dev_addr); - buff_pos += BATMAN_OGM_HLEN; - buff_pos += batadv_tt_len(batman_ogm_packet->tt_num_changes); + buff_pos += BATMAN_OGM_HLEN + + tt_len(batman_ogm_packet->tt_num_changes); packet_num++; batman_ogm_packet = (struct batman_ogm_packet *) (forw_packet->skb->data + buff_pos); @@ -203,7 +201,7 @@ static void bat_iv_ogm_send_to_if(struct forw_packet *forw_packet, batadv_inc_counter(bat_priv, BAT_CNT_MGMT_TX); batadv_add_counter(bat_priv, BAT_CNT_MGMT_TX_BYTES, skb->len + ETH_HLEN); - batadv_send_skb_packet(skb, hard_iface, batadv_broadcast_addr); + send_skb_packet(skb, hard_iface, broadcast_addr); } } @@ -236,9 +234,8 @@ static void bat_iv_ogm_emit(struct forw_packet *forw_packet) if (!primary_if) goto out; - /* multihomed peer assumed - * non-primary OGMs are only broadcasted on their interface - */ + /* multihomed peer assumed */ + /* non-primary OGMs are only broadcasted on their interface */ if ((directlink && (batman_ogm_packet->header.ttl == 1)) || (forw_packet->own && (forw_packet->if_incoming != primary_if))) { @@ -253,9 +250,8 @@ static void bat_iv_ogm_emit(struct forw_packet *forw_packet) forw_packet->if_incoming->net_dev->dev_addr); /* skb is only used once and than forw_packet is free'd */ - batadv_send_skb_packet(forw_packet->skb, - forw_packet->if_incoming, - batadv_broadcast_addr); + send_skb_packet(forw_packet->skb, forw_packet->if_incoming, + broadcast_addr); forw_packet->skb = NULL; goto out; @@ -263,7 +259,7 @@ static void bat_iv_ogm_emit(struct forw_packet *forw_packet) /* broadcast on every interface */ rcu_read_lock(); - list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { + list_for_each_entry_rcu(hard_iface, &hardif_list, list) { if (hard_iface->soft_iface != soft_iface) continue; @@ -292,39 +288,41 @@ static bool bat_iv_ogm_can_aggregate(const struct batman_ogm_packet batman_ogm_packet = (struct batman_ogm_packet *)forw_packet->skb->data; - /* we can aggregate the current packet to this aggregated packet + /** + * we can aggregate the current packet to this aggregated packet * if: * * - the send time is within our MAX_AGGREGATION_MS time * - the resulting packet wont be bigger than * MAX_AGGREGATION_BYTES */ + if (time_before(send_time, forw_packet->send_time) && time_after_eq(send_time + msecs_to_jiffies(MAX_AGGREGATION_MS), forw_packet->send_time) && (aggregated_bytes <= MAX_AGGREGATION_BYTES)) { - /* check aggregation compatibility + /** + * check aggregation compatibility * -> direct link packets are broadcasted on * their interface only * -> aggregate packet if the current packet is * a "global" packet as well as the base * packet */ + primary_if = primary_if_get_selected(bat_priv); if (!primary_if) goto out; /* packets without direct link flag and high TTL - * are flooded through the net - */ + * are flooded through the net */ if ((!directlink) && (!(batman_ogm_packet->flags & DIRECTLINK)) && (batman_ogm_packet->header.ttl != 1) && /* own packets originating non-primary - * interfaces leave only that interface - */ + * interfaces leave only that interface */ ((!forw_packet->own) || (forw_packet->if_incoming == primary_if))) { res = true; @@ -332,16 +330,14 @@ static bool bat_iv_ogm_can_aggregate(const struct batman_ogm_packet } /* if the incoming packet is sent via this one - * interface only - we still can aggregate - */ + * interface only - we still can aggregate */ if ((directlink) && (new_batman_ogm_packet->header.ttl == 1) && (forw_packet->if_incoming == if_incoming) && /* packets from direct neighbors or * own secondary interface packets - * (= secondary interface packets in general) - */ + * (= secondary interface packets in general) */ (batman_ogm_packet->flags & DIRECTLINK || (forw_packet->own && forw_packet->if_incoming != primary_if))) { @@ -424,8 +420,8 @@ static void bat_iv_ogm_aggregate_new(const unsigned char *packet_buff, /* start timer for this packet */ INIT_DELAYED_WORK(&forw_packet_aggr->delayed_work, - batadv_send_outstanding_bat_ogm_packet); - queue_delayed_work(batadv_event_workqueue, + send_outstanding_bat_ogm_packet); + queue_delayed_work(bat_event_workqueue, &forw_packet_aggr->delayed_work, send_time - jiffies); @@ -457,7 +453,8 @@ static void bat_iv_ogm_queue_add(struct bat_priv *bat_priv, int packet_len, struct hard_iface *if_incoming, int own_packet, unsigned long send_time) { - /* _aggr -> pointer to the packet we want to aggregate with + /** + * _aggr -> pointer to the packet we want to aggregate with * _pos -> pointer to the position in the queue */ struct forw_packet *forw_packet_aggr = NULL, *forw_packet_pos = NULL; @@ -486,13 +483,13 @@ static void bat_iv_ogm_queue_add(struct bat_priv *bat_priv, } /* nothing to aggregate with - either aggregation disabled or no - * suitable aggregation packet found - */ + * suitable aggregation packet found */ if (!forw_packet_aggr) { /* the following section can run without the lock */ spin_unlock_bh(&bat_priv->forw_bat_list_lock); - /* if we could not aggregate this packet with one of the others + /** + * if we could not aggregate this packet with one of the others * we hold it back for a while, so that it might be aggregated * later on */ @@ -558,7 +555,7 @@ static void bat_iv_ogm_forward(struct orig_node *orig_node, batman_ogm_packet->flags &= ~DIRECTLINK; bat_iv_ogm_queue_add(bat_priv, (unsigned char *)batman_ogm_packet, - BATMAN_OGM_HLEN + batadv_tt_len(tt_num_changes), + BATMAN_OGM_HLEN + tt_len(tt_num_changes), if_incoming, 0, bat_iv_ogm_fwd_send_time()); } @@ -602,7 +599,7 @@ static void bat_iv_ogm_schedule(struct hard_iface *hard_iface) else batman_ogm_packet->gw_flags = NO_FLAGS; - batadv_slide_own_bcast_window(hard_iface); + slide_own_bcast_window(hard_iface); bat_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff, hard_iface->packet_len, hard_iface, 1, bat_iv_ogm_emit_send_time(bat_priv)); @@ -636,7 +633,7 @@ static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv, (tmp_neigh_node->if_incoming == if_incoming) && atomic_inc_not_zero(&tmp_neigh_node->refcount)) { if (neigh_node) - batadv_neigh_node_free_ref(neigh_node); + neigh_node_free_ref(neigh_node); neigh_node = tmp_neigh_node; continue; } @@ -645,17 +642,17 @@ static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv, continue; spin_lock_bh(&tmp_neigh_node->lq_update_lock); - batadv_ring_buffer_set(tmp_neigh_node->tq_recv, - &tmp_neigh_node->tq_index, 0); + ring_buffer_set(tmp_neigh_node->tq_recv, + &tmp_neigh_node->tq_index, 0); tmp_neigh_node->tq_avg = - batadv_ring_buffer_avg(tmp_neigh_node->tq_recv); + ring_buffer_avg(tmp_neigh_node->tq_recv); spin_unlock_bh(&tmp_neigh_node->lq_update_lock); } if (!neigh_node) { struct orig_node *orig_tmp; - orig_tmp = batadv_get_orig_node(bat_priv, ethhdr->h_source); + orig_tmp = get_orig_node(bat_priv, ethhdr->h_source); if (!orig_tmp) goto unlock; @@ -663,7 +660,7 @@ static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv, orig_node, orig_tmp, batman_ogm_packet->seqno); - batadv_orig_node_free_ref(orig_tmp); + orig_node_free_ref(orig_tmp); if (!neigh_node) goto unlock; } else @@ -676,10 +673,10 @@ static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv, neigh_node->last_seen = jiffies; spin_lock_bh(&neigh_node->lq_update_lock); - batadv_ring_buffer_set(neigh_node->tq_recv, - &neigh_node->tq_index, - batman_ogm_packet->tq); - neigh_node->tq_avg = batadv_ring_buffer_avg(neigh_node->tq_recv); + ring_buffer_set(neigh_node->tq_recv, + &neigh_node->tq_index, + batman_ogm_packet->tq); + neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv); spin_unlock_bh(&neigh_node->lq_update_lock); if (!is_duplicate) { @@ -687,12 +684,11 @@ static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv, neigh_node->last_ttl = batman_ogm_packet->header.ttl; } - batadv_bonding_candidate_add(orig_node, neigh_node); + bonding_candidate_add(orig_node, neigh_node); /* if this neighbor already is our next hop there is nothing - * to change - */ - router = batadv_orig_node_get_router(orig_node); + * to change */ + router = orig_node_get_router(orig_node); if (router == neigh_node) goto update_tt; @@ -701,8 +697,7 @@ static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv, goto update_tt; /* if the TQ is the same and the link not more symmetric we - * won't consider it either - */ + * won't consider it either */ if (router && (neigh_node->tq_avg == router->tq_avg)) { orig_node_tmp = router->orig_node; spin_lock_bh(&orig_node_tmp->ogm_cnt_lock); @@ -720,23 +715,22 @@ static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv, goto update_tt; } - batadv_update_route(bat_priv, orig_node, neigh_node); + update_route(bat_priv, orig_node, neigh_node); update_tt: /* I have to check for transtable changes only if the OGM has been - * sent through a primary interface - */ + * sent through a primary interface */ if (((batman_ogm_packet->orig != ethhdr->h_source) && (batman_ogm_packet->header.ttl > 2)) || (batman_ogm_packet->flags & PRIMARIES_FIRST_HOP)) - batadv_tt_update_orig(bat_priv, orig_node, tt_buff, - batman_ogm_packet->tt_num_changes, - batman_ogm_packet->ttvn, - ntohs(batman_ogm_packet->tt_crc)); + tt_update_orig(bat_priv, orig_node, tt_buff, + batman_ogm_packet->tt_num_changes, + batman_ogm_packet->ttvn, + ntohs(batman_ogm_packet->tt_crc)); if (orig_node->gw_flags != batman_ogm_packet->gw_flags) - batadv_gw_node_update(bat_priv, orig_node, - batman_ogm_packet->gw_flags); + gw_node_update(bat_priv, orig_node, + batman_ogm_packet->gw_flags); orig_node->gw_flags = batman_ogm_packet->gw_flags; @@ -744,7 +738,7 @@ static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv, if ((orig_node->gw_flags) && (atomic_read(&bat_priv->gw_mode) == GW_MODE_CLIENT) && (atomic_read(&bat_priv->gw_sel_class) > 2)) - batadv_gw_check_election(bat_priv, orig_node); + gw_check_election(bat_priv, orig_node); goto out; @@ -752,9 +746,9 @@ static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv, rcu_read_unlock(); out: if (neigh_node) - batadv_neigh_node_free_ref(neigh_node); + neigh_node_free_ref(neigh_node); if (router) - batadv_neigh_node_free_ref(router); + neigh_node_free_ref(router); } static int bat_iv_ogm_calc_tq(struct orig_node *orig_node, @@ -814,17 +808,15 @@ static int bat_iv_ogm_calc_tq(struct orig_node *orig_node, total_count = (orig_eq_count > neigh_rq_count ? neigh_rq_count : orig_eq_count); - /* if we have too few packets (too less data) we set tq_own to zero - * if we receive too few packets it is not considered bidirectional - */ + /* if we have too few packets (too less data) we set tq_own to zero */ + /* if we receive too few packets it is not considered bidirectional */ if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) || (neigh_rq_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM)) tq_own = 0; else /* neigh_node->real_packet_count is never zero as we * only purge old information when getting new - * information - */ + * information */ tq_own = (TQ_MAX_VALUE * total_count) / neigh_rq_count; /* 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does @@ -850,14 +842,13 @@ static int bat_iv_ogm_calc_tq(struct orig_node *orig_node, neigh_rq_count, tq_own, tq_asym_penalty, batman_ogm_packet->tq); /* if link has the minimum required transmission quality - * consider it bidirectional - */ + * consider it bidirectional */ if (batman_ogm_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT) ret = 1; out: if (neigh_node) - batadv_neigh_node_free_ref(neigh_node); + neigh_node_free_ref(neigh_node); return ret; } @@ -884,7 +875,7 @@ static int bat_iv_ogm_update_seqnos(const struct ethhdr *ethhdr, int set_mark, ret = -1; uint32_t seqno = ntohl(batman_ogm_packet->seqno); - orig_node = batadv_get_orig_node(bat_priv, batman_ogm_packet->orig); + orig_node = get_orig_node(bat_priv, batman_ogm_packet->orig); if (!orig_node) return 0; @@ -893,8 +884,8 @@ static int bat_iv_ogm_update_seqnos(const struct ethhdr *ethhdr, /* signalize caller that the packet is to be dropped. */ if (!hlist_empty(&orig_node->neigh_list) && - batadv_window_protected(bat_priv, seq_diff, - &orig_node->batman_seqno_reset)) + window_protected(bat_priv, seq_diff, + &orig_node->batman_seqno_reset)) goto out; rcu_read_lock(); @@ -912,9 +903,9 @@ static int bat_iv_ogm_update_seqnos(const struct ethhdr *ethhdr, set_mark = 0; /* if the window moved, set the update flag. */ - need_update |= batadv_bit_get_packet(bat_priv, - tmp_neigh_node->real_bits, - seq_diff, set_mark); + need_update |= bit_get_packet(bat_priv, + tmp_neigh_node->real_bits, + seq_diff, set_mark); tmp_neigh_node->real_packet_count = bitmap_weight(tmp_neigh_node->real_bits, @@ -933,7 +924,7 @@ static int bat_iv_ogm_update_seqnos(const struct ethhdr *ethhdr, out: spin_unlock_bh(&orig_node->ogm_cnt_lock); - batadv_orig_node_free_ref(orig_node); + orig_node_free_ref(orig_node); return ret; } @@ -989,7 +980,7 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr, batman_ogm_packet->header.version, has_directlink_flag); rcu_read_lock(); - list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { + list_for_each_entry_rcu(hard_iface, &hardif_list, list) { if (hard_iface->if_status != IF_ACTIVE) continue; @@ -1038,15 +1029,13 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr, unsigned long *word; int offset; - orig_neigh_node = batadv_get_orig_node(bat_priv, - ethhdr->h_source); + orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source); if (!orig_neigh_node) return; /* neighbor has to indicate direct link and it has to - * come via the corresponding interface - * save packet seqno for bidirectional check - */ + * come via the corresponding interface */ + /* save packet seqno for bidirectional check */ if (has_directlink_flag && compare_eth(if_incoming->net_dev->dev_addr, batman_ogm_packet->orig)) { @@ -1064,7 +1053,7 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr, bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: originator packet from myself (via neighbor)\n"); - batadv_orig_node_free_ref(orig_neigh_node); + orig_node_free_ref(orig_neigh_node); return; } @@ -1082,7 +1071,7 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr, return; } - orig_node = batadv_get_orig_node(bat_priv, batman_ogm_packet->orig); + orig_node = get_orig_node(bat_priv, batman_ogm_packet->orig); if (!orig_node) return; @@ -1102,9 +1091,9 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr, goto out; } - router = batadv_orig_node_get_router(orig_node); + router = orig_node_get_router(orig_node); if (router) - router_router = batadv_orig_node_get_router(router->orig_node); + router_router = orig_node_get_router(router->orig_node); if ((router && router->tq_avg != 0) && (compare_eth(router->addr, ethhdr->h_source))) @@ -1123,19 +1112,17 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr, } /* if sender is a direct neighbor the sender mac equals - * originator mac - */ + * originator mac */ orig_neigh_node = (is_single_hop_neigh ? orig_node : - batadv_get_orig_node(bat_priv, ethhdr->h_source)); + get_orig_node(bat_priv, ethhdr->h_source)); if (!orig_neigh_node) goto out; - orig_neigh_router = batadv_orig_node_get_router(orig_neigh_node); + orig_neigh_router = orig_node_get_router(orig_neigh_node); /* drop packet if sender is not a direct neighbor and if we - * don't route towards it - */ + * don't route towards it */ if (!is_single_hop_neigh && (!orig_neigh_router)) { bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: OGM via unknown neighbor!\n"); @@ -1145,12 +1132,10 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr, is_bidirectional = bat_iv_ogm_calc_tq(orig_node, orig_neigh_node, batman_ogm_packet, if_incoming); - batadv_bonding_save_primary(orig_node, orig_neigh_node, - batman_ogm_packet); + bonding_save_primary(orig_node, orig_neigh_node, batman_ogm_packet); /* update ranking if it is not a duplicate or has the same - * seqno and similar ttl as the non-duplicate - */ + * seqno and similar ttl as the non-duplicate */ if (is_bidirectional && (!is_duplicate || ((orig_node->last_real_seqno == ntohl(batman_ogm_packet->seqno)) && @@ -1193,16 +1178,16 @@ static void bat_iv_ogm_process(const struct ethhdr *ethhdr, out_neigh: if ((orig_neigh_node) && (!is_single_hop_neigh)) - batadv_orig_node_free_ref(orig_neigh_node); + orig_node_free_ref(orig_neigh_node); out: if (router) - batadv_neigh_node_free_ref(router); + neigh_node_free_ref(router); if (router_router) - batadv_neigh_node_free_ref(router_router); + neigh_node_free_ref(router_router); if (orig_neigh_router) - batadv_neigh_node_free_ref(orig_neigh_router); + neigh_node_free_ref(orig_neigh_router); - batadv_orig_node_free_ref(orig_node); + orig_node_free_ref(orig_node); } static int bat_iv_ogm_receive(struct sk_buff *skb, @@ -1215,7 +1200,7 @@ static int bat_iv_ogm_receive(struct sk_buff *skb, unsigned char *tt_buff, *packet_buff; bool ret; - ret = batadv_check_management_packet(skb, if_incoming, BATMAN_OGM_HLEN); + ret = check_management_packet(skb, if_incoming, BATMAN_OGM_HLEN); if (!ret) return NET_RX_DROP; @@ -1241,8 +1226,8 @@ static int bat_iv_ogm_receive(struct sk_buff *skb, bat_iv_ogm_process(ethhdr, batman_ogm_packet, tt_buff, if_incoming); - buff_pos += BATMAN_OGM_HLEN; - buff_pos += batadv_tt_len(batman_ogm_packet->tt_num_changes); + buff_pos += BATMAN_OGM_HLEN + + tt_len(batman_ogm_packet->tt_num_changes); batman_ogm_packet = (struct batman_ogm_packet *) (packet_buff + buff_pos); @@ -1263,23 +1248,23 @@ static struct bat_algo_ops batman_iv __read_mostly = { .bat_ogm_emit = bat_iv_ogm_emit, }; -int __init batadv_iv_init(void) +int __init bat_iv_init(void) { int ret; /* batman originator packet */ - ret = batadv_recv_handler_register(BAT_IV_OGM, bat_iv_ogm_receive); + ret = recv_handler_register(BAT_IV_OGM, bat_iv_ogm_receive); if (ret < 0) goto out; - ret = batadv_algo_register(&batman_iv); + ret = bat_algo_register(&batman_iv); if (ret < 0) goto handler_unregister; goto out; handler_unregister: - batadv_recv_handler_unregister(BAT_IV_OGM); + recv_handler_unregister(BAT_IV_OGM); out: return ret; } diff --git a/trunk/net/batman-adv/bat_sysfs.c b/trunk/net/batman-adv/bat_sysfs.c index 8196fa6ff22e..dc1edbee63df 100644 --- a/trunk/net/batman-adv/bat_sysfs.c +++ b/trunk/net/batman-adv/bat_sysfs.c @@ -1,4 +1,5 @@ -/* Copyright (C) 2010-2012 B.A.T.M.A.N. contributors: +/* + * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors: * * Marek Lindner * @@ -15,6 +16,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA + * */ #include "main.h" @@ -82,8 +84,7 @@ ssize_t show_##_name(struct kobject *kobj, \ } \ /* Use this, if you are going to turn a [name] in the soft-interface - * (bat_priv) on or off - */ + * (bat_priv) on or off */ #define BAT_ATTR_SIF_BOOL(_name, _mode, _post_func) \ static BAT_ATTR_SIF_STORE_BOOL(_name, _post_func) \ static BAT_ATTR_SIF_SHOW_BOOL(_name) \ @@ -109,8 +110,7 @@ ssize_t show_##_name(struct kobject *kobj, \ } \ /* Use this, if you are going to set [name] in the soft-interface - * (bat_priv) to an unsigned integer value - */ + * (bat_priv) to an unsigned integer value */ #define BAT_ATTR_SIF_UINT(_name, _mode, _min, _max, _post_func) \ static BAT_ATTR_SIF_STORE_UINT(_name, _min, _max, _post_func) \ static BAT_ATTR_SIF_SHOW_UINT(_name) \ @@ -122,10 +122,9 @@ ssize_t store_##_name(struct kobject *kobj, struct attribute *attr, \ char *buff, size_t count) \ { \ struct net_device *net_dev = kobj_to_netdev(kobj); \ - struct hard_iface *hard_iface; \ + struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev); \ ssize_t length; \ \ - hard_iface = batadv_hardif_get_by_netdev(net_dev); \ if (!hard_iface) \ return 0; \ \ @@ -141,10 +140,9 @@ ssize_t show_##_name(struct kobject *kobj, \ struct attribute *attr, char *buff) \ { \ struct net_device *net_dev = kobj_to_netdev(kobj); \ - struct hard_iface *hard_iface; \ + struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev); \ ssize_t length; \ \ - hard_iface = batadv_hardif_get_by_netdev(net_dev); \ if (!hard_iface) \ return 0; \ \ @@ -155,8 +153,7 @@ ssize_t show_##_name(struct kobject *kobj, \ } /* Use this, if you are going to set [name] in hard_iface to an - * unsigned integer value - */ + * unsigned integer value*/ #define BAT_ATTR_HIF_UINT(_name, _mode, _min, _max, _post_func) \ static BAT_ATTR_HIF_STORE_UINT(_name, _min, _max, _post_func) \ static BAT_ATTR_HIF_SHOW_UINT(_name) \ @@ -329,7 +326,7 @@ static ssize_t show_bat_algo(struct kobject *kobj, struct attribute *attr, static void post_gw_deselect(struct net_device *net_dev) { struct bat_priv *bat_priv = netdev_priv(net_dev); - batadv_gw_deselect(bat_priv); + gw_deselect(bat_priv); } static ssize_t show_gw_mode(struct kobject *kobj, struct attribute *attr, @@ -400,7 +397,7 @@ static ssize_t store_gw_mode(struct kobject *kobj, struct attribute *attr, bat_info(net_dev, "Changing gw mode from: %s to: %s\n", curr_gw_mode_str, buff); - batadv_gw_deselect(bat_priv); + gw_deselect(bat_priv); atomic_set(&bat_priv->gw_mode, (unsigned int)gw_mode_tmp); return count; } @@ -412,7 +409,7 @@ static ssize_t show_gw_bwidth(struct kobject *kobj, struct attribute *attr, int down, up; int gw_bandwidth = atomic_read(&bat_priv->gw_bandwidth); - batadv_gw_bandwidth_to_kbit(gw_bandwidth, &down, &up); + gw_bandwidth_to_kbit(gw_bandwidth, &down, &up); return sprintf(buff, "%i%s/%i%s\n", (down > 2048 ? down / 1024 : down), (down > 2048 ? "MBit" : "KBit"), @@ -428,7 +425,7 @@ static ssize_t store_gw_bwidth(struct kobject *kobj, struct attribute *attr, if (buff[count - 1] == '\n') buff[count - 1] = '\0'; - return batadv_gw_bandwidth_set(net_dev, buff, count); + return gw_bandwidth_set(net_dev, buff, count); } BAT_ATTR_SIF_BOOL(aggregated_ogms, S_IRUGO | S_IWUSR, NULL); @@ -436,7 +433,7 @@ BAT_ATTR_SIF_BOOL(bonding, S_IRUGO | S_IWUSR, NULL); #ifdef CONFIG_BATMAN_ADV_BLA BAT_ATTR_SIF_BOOL(bridge_loop_avoidance, S_IRUGO | S_IWUSR, NULL); #endif -BAT_ATTR_SIF_BOOL(fragmentation, S_IRUGO | S_IWUSR, batadv_update_min_mtu); +BAT_ATTR_SIF_BOOL(fragmentation, S_IRUGO | S_IWUSR, update_min_mtu); BAT_ATTR_SIF_BOOL(ap_isolation, S_IRUGO | S_IWUSR, NULL); static BAT_ATTR(vis_mode, S_IRUGO | S_IWUSR, show_vis_mode, store_vis_mode); static BAT_ATTR(routing_algo, S_IRUGO, show_bat_algo, NULL); @@ -472,7 +469,7 @@ static struct bat_attribute *mesh_attrs[] = { NULL, }; -int batadv_sysfs_add_meshif(struct net_device *dev) +int sysfs_add_meshif(struct net_device *dev) { struct kobject *batif_kobject = &dev->dev.kobj; struct bat_priv *bat_priv = netdev_priv(dev); @@ -510,7 +507,7 @@ int batadv_sysfs_add_meshif(struct net_device *dev) return -ENOMEM; } -void batadv_sysfs_del_meshif(struct net_device *dev) +void sysfs_del_meshif(struct net_device *dev) { struct bat_priv *bat_priv = netdev_priv(dev); struct bat_attribute **bat_attr; @@ -526,7 +523,7 @@ static ssize_t show_mesh_iface(struct kobject *kobj, struct attribute *attr, char *buff) { struct net_device *net_dev = kobj_to_netdev(kobj); - struct hard_iface *hard_iface = batadv_hardif_get_by_netdev(net_dev); + struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev); ssize_t length; if (!hard_iface) @@ -544,7 +541,7 @@ static ssize_t store_mesh_iface(struct kobject *kobj, struct attribute *attr, char *buff, size_t count) { struct net_device *net_dev = kobj_to_netdev(kobj); - struct hard_iface *hard_iface = batadv_hardif_get_by_netdev(net_dev); + struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev); int status_tmp = -1; int ret = count; @@ -579,15 +576,15 @@ static ssize_t store_mesh_iface(struct kobject *kobj, struct attribute *attr, } if (status_tmp == IF_NOT_IN_USE) { - batadv_hardif_disable_interface(hard_iface); + hardif_disable_interface(hard_iface); goto unlock; } /* if the interface already is in use */ if (hard_iface->if_status != IF_NOT_IN_USE) - batadv_hardif_disable_interface(hard_iface); + hardif_disable_interface(hard_iface); - ret = batadv_hardif_enable_interface(hard_iface, buff); + ret = hardif_enable_interface(hard_iface, buff); unlock: rtnl_unlock(); @@ -600,7 +597,7 @@ static ssize_t show_iface_status(struct kobject *kobj, struct attribute *attr, char *buff) { struct net_device *net_dev = kobj_to_netdev(kobj); - struct hard_iface *hard_iface = batadv_hardif_get_by_netdev(net_dev); + struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev); ssize_t length; if (!hard_iface) @@ -640,7 +637,7 @@ static struct bat_attribute *batman_attrs[] = { NULL, }; -int batadv_sysfs_add_hardif(struct kobject **hardif_obj, struct net_device *dev) +int sysfs_add_hardif(struct kobject **hardif_obj, struct net_device *dev) { struct kobject *hardif_kobject = &dev->dev.kobj; struct bat_attribute **bat_attr; @@ -674,14 +671,14 @@ int batadv_sysfs_add_hardif(struct kobject **hardif_obj, struct net_device *dev) return -ENOMEM; } -void batadv_sysfs_del_hardif(struct kobject **hardif_obj) +void sysfs_del_hardif(struct kobject **hardif_obj) { kobject_put(*hardif_obj); *hardif_obj = NULL; } -int batadv_throw_uevent(struct bat_priv *bat_priv, enum uev_type type, - enum uev_action action, const char *data) +int throw_uevent(struct bat_priv *bat_priv, enum uev_type type, + enum uev_action action, const char *data) { int ret = -ENOMEM; struct hard_iface *primary_if = NULL; diff --git a/trunk/net/batman-adv/bat_sysfs.h b/trunk/net/batman-adv/bat_sysfs.h index 367227707d52..fece77ae586e 100644 --- a/trunk/net/batman-adv/bat_sysfs.h +++ b/trunk/net/batman-adv/bat_sysfs.h @@ -1,4 +1,5 @@ -/* Copyright (C) 2010-2012 B.A.T.M.A.N. contributors: +/* + * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors: * * Marek Lindner * @@ -15,8 +16,10 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA + * */ + #ifndef _NET_BATMAN_ADV_SYSFS_H_ #define _NET_BATMAN_ADV_SYSFS_H_ @@ -31,12 +34,11 @@ struct bat_attribute { char *buf, size_t count); }; -int batadv_sysfs_add_meshif(struct net_device *dev); -void batadv_sysfs_del_meshif(struct net_device *dev); -int batadv_sysfs_add_hardif(struct kobject **hardif_obj, - struct net_device *dev); -void batadv_sysfs_del_hardif(struct kobject **hardif_obj); -int batadv_throw_uevent(struct bat_priv *bat_priv, enum uev_type type, - enum uev_action action, const char *data); +int sysfs_add_meshif(struct net_device *dev); +void sysfs_del_meshif(struct net_device *dev); +int sysfs_add_hardif(struct kobject **hardif_obj, struct net_device *dev); +void sysfs_del_hardif(struct kobject **hardif_obj); +int throw_uevent(struct bat_priv *bat_priv, enum uev_type type, + enum uev_action action, const char *data); #endif /* _NET_BATMAN_ADV_SYSFS_H_ */ diff --git a/trunk/net/batman-adv/bitarray.c b/trunk/net/batman-adv/bitarray.c index 838abbc73c6c..07ae6e1b8aca 100644 --- a/trunk/net/batman-adv/bitarray.c +++ b/trunk/net/batman-adv/bitarray.c @@ -1,4 +1,5 @@ -/* Copyright (C) 2006-2012 B.A.T.M.A.N. contributors: +/* + * Copyright (C) 2006-2012 B.A.T.M.A.N. contributors: * * Simon Wunderlich, Marek Lindner * @@ -15,6 +16,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA + * */ #include "main.h" @@ -23,7 +25,7 @@ #include /* shift the packet array by n places. */ -static void batadv_bitmap_shift_left(unsigned long *seq_bits, int32_t n) +static void bat_bitmap_shift_left(unsigned long *seq_bits, int32_t n) { if (n <= 0 || n >= TQ_LOCAL_WINDOW_SIZE) return; @@ -38,14 +40,14 @@ static void batadv_bitmap_shift_left(unsigned long *seq_bits, int32_t n) * 1 if the window was moved (either new or very old) * 0 if the window was not moved/shifted. */ -int batadv_bit_get_packet(void *priv, unsigned long *seq_bits, - int32_t seq_num_diff, int set_mark) +int bit_get_packet(void *priv, unsigned long *seq_bits, + int32_t seq_num_diff, int set_mark) { struct bat_priv *bat_priv = priv; /* sequence number is slightly older. We already got a sequence number - * higher than this one, so we just mark it. - */ + * higher than this one, so we just mark it. */ + if ((seq_num_diff <= 0) && (seq_num_diff > -TQ_LOCAL_WINDOW_SIZE)) { if (set_mark) bat_set_bit(seq_bits, -seq_num_diff); @@ -53,10 +55,10 @@ int batadv_bit_get_packet(void *priv, unsigned long *seq_bits, } /* sequence number is slightly newer, so we shift the window and - * set the mark if required - */ + * set the mark if required */ + if ((seq_num_diff > 0) && (seq_num_diff < TQ_LOCAL_WINDOW_SIZE)) { - batadv_bitmap_shift_left(seq_bits, seq_num_diff); + bat_bitmap_shift_left(seq_bits, seq_num_diff); if (set_mark) bat_set_bit(seq_bits, 0); @@ -64,6 +66,7 @@ int batadv_bit_get_packet(void *priv, unsigned long *seq_bits, } /* sequence number is much newer, probably missed a lot of packets */ + if ((seq_num_diff >= TQ_LOCAL_WINDOW_SIZE) && (seq_num_diff < EXPECTED_SEQNO_RANGE)) { bat_dbg(DBG_BATMAN, bat_priv, @@ -78,8 +81,8 @@ int batadv_bit_get_packet(void *priv, unsigned long *seq_bits, /* received a much older packet. The other host either restarted * or the old packet got delayed somewhere in the network. The * packet should be dropped without calling this function if the - * seqno window is protected. - */ + * seqno window is protected. */ + if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE) || (seq_num_diff >= EXPECTED_SEQNO_RANGE)) { diff --git a/trunk/net/batman-adv/bitarray.h b/trunk/net/batman-adv/bitarray.h index 8ab542632343..1835c15cda41 100644 --- a/trunk/net/batman-adv/bitarray.h +++ b/trunk/net/batman-adv/bitarray.h @@ -1,4 +1,5 @@ -/* Copyright (C) 2006-2012 B.A.T.M.A.N. contributors: +/* + * Copyright (C) 2006-2012 B.A.T.M.A.N. contributors: * * Simon Wunderlich, Marek Lindner * @@ -15,14 +16,14 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA + * */ #ifndef _NET_BATMAN_ADV_BITARRAY_H_ #define _NET_BATMAN_ADV_BITARRAY_H_ /* returns true if the corresponding bit in the given seq_bits indicates true - * and curr_seqno is within range of last_seqno - */ + * and curr_seqno is within range of last_seqno */ static inline int bat_test_bit(const unsigned long *seq_bits, uint32_t last_seqno, uint32_t curr_seqno) { @@ -46,9 +47,8 @@ static inline void bat_set_bit(unsigned long *seq_bits, int32_t n) } /* receive and process one packet, returns 1 if received seq_num is considered - * new, 0 if old - */ -int batadv_bit_get_packet(void *priv, unsigned long *seq_bits, - int32_t seq_num_diff, int set_mark); + * new, 0 if old */ +int bit_get_packet(void *priv, unsigned long *seq_bits, + int32_t seq_num_diff, int set_mark); #endif /* _NET_BATMAN_ADV_BITARRAY_H_ */ diff --git a/trunk/net/batman-adv/bridge_loop_avoidance.c b/trunk/net/batman-adv/bridge_loop_avoidance.c index 52c0d637d581..314e37b272a7 100644 --- a/trunk/net/batman-adv/bridge_loop_avoidance.c +++ b/trunk/net/batman-adv/bridge_loop_avoidance.c @@ -1,4 +1,5 @@ -/* Copyright (C) 2011-2012 B.A.T.M.A.N. contributors: +/* + * Copyright (C) 2011-2012 B.A.T.M.A.N. contributors: * * Simon Wunderlich * @@ -15,6 +16,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA + * */ #include "main.h" @@ -121,7 +123,8 @@ static void claim_free_ref(struct claim *claim) call_rcu(&claim->rcu, claim_free_rcu); } -/* @bat_priv: the bat priv with all the soft interface information +/** + * @bat_priv: the bat priv with all the soft interface information * @data: search data (may be local/static data) * * looks for a claim in the hash, and returns it if found @@ -159,7 +162,8 @@ static struct claim *claim_hash_find(struct bat_priv *bat_priv, return claim_tmp; } -/* @bat_priv: the bat priv with all the soft interface information +/** + * @bat_priv: the bat priv with all the soft interface information * @addr: the address of the originator * @vid: the VLAN ID * @@ -237,7 +241,8 @@ static void bla_del_backbone_claims(struct backbone_gw *backbone_gw) backbone_gw->crc = BLA_CRC_INIT; } -/* @bat_priv: the bat priv with all the soft interface information +/** + * @bat_priv: the bat priv with all the soft interface information * @orig: the mac address to be announced within the claim * @vid: the VLAN ID * @claimtype: the type of the claim (CLAIM, UNCLAIM, ANNOUNCE, ...) @@ -342,7 +347,8 @@ static void bla_send_claim(struct bat_priv *bat_priv, uint8_t *mac, hardif_free_ref(primary_if); } -/* @bat_priv: the bat priv with all the soft interface information +/** + * @bat_priv: the bat priv with all the soft interface information * @orig: the mac address of the originator * @vid: the VLAN ID * @@ -391,9 +397,9 @@ static struct backbone_gw *bla_get_backbone_gw(struct bat_priv *bat_priv, /* this is a gateway now, remove any tt entries */ orig_node = orig_hash_find(bat_priv, orig); if (orig_node) { - batadv_tt_global_del_orig(bat_priv, orig_node, - "became a backbone gateway"); - batadv_orig_node_free_ref(orig_node); + tt_global_del_orig(bat_priv, orig_node, + "became a backbone gateway"); + orig_node_free_ref(orig_node); } return entry; } @@ -416,7 +422,8 @@ static void bla_update_own_backbone_gw(struct bat_priv *bat_priv, backbone_gw_free_ref(backbone_gw); } -/* @bat_priv: the bat priv with all the soft interface information +/** + * @bat_priv: the bat priv with all the soft interface information * @vid: the vid where the request came on * * Repeat all of our own claims, and finally send an ANNOUNCE frame @@ -461,7 +468,8 @@ static void bla_answer_request(struct bat_priv *bat_priv, backbone_gw_free_ref(backbone_gw); } -/* @backbone_gw: the backbone gateway from whom we are out of sync +/** + * @backbone_gw: the backbone gateway from whom we are out of sync * * When the crc is wrong, ask the backbone gateway for a full table update. * After the request, it will repeat all of his own claims and finally @@ -487,7 +495,8 @@ static void bla_send_request(struct backbone_gw *backbone_gw) } } -/* @bat_priv: the bat priv with all the soft interface information +/** + * @bat_priv: the bat priv with all the soft interface information * @backbone_gw: our backbone gateway which should be announced * * This function sends an announcement. It is called from multiple @@ -507,7 +516,8 @@ static void bla_send_announce(struct bat_priv *bat_priv, } -/* @bat_priv: the bat priv with all the soft interface information +/** + * @bat_priv: the bat priv with all the soft interface information * @mac: the mac address of the claim * @vid: the VLAN ID of the frame * @backbone_gw: the backbone gateway which claims it @@ -721,7 +731,8 @@ static int handle_claim(struct bat_priv *bat_priv, return 1; } -/* @bat_priv: the bat priv with all the soft interface information +/** + * @bat_priv: the bat priv with all the soft interface information * @hw_src: the Hardware source in the ARP Header * @hw_dst: the Hardware destination in the ARP Header * @ethhdr: pointer to the Ethernet header of the claim frame @@ -793,13 +804,14 @@ static int check_claim_group(struct bat_priv *bat_priv, bla_dst_own->group = bla_dst->group; } - batadv_orig_node_free_ref(orig_node); + orig_node_free_ref(orig_node); return 2; } -/* @bat_priv: the bat priv with all the soft interface information +/** + * @bat_priv: the bat priv with all the soft interface information * @skb: the frame to be checked * * Check if this is a claim frame, and process it accordingly. @@ -848,6 +860,7 @@ static int bla_process_claim(struct bat_priv *bat_priv, /* Check whether the ARP frame carries a valid * IP information */ + if (arphdr->ar_hrd != htons(ARPHRD_ETHER)) return 0; if (arphdr->ar_pro != htons(ETH_P_IP)) @@ -950,7 +963,8 @@ static void bla_purge_backbone_gw(struct bat_priv *bat_priv, int now) } } -/* @bat_priv: the bat priv with all the soft interface information +/** + * @bat_priv: the bat priv with all the soft interface information * @primary_if: the selected primary interface, may be NULL if now is set * @now: whether the whole hash shall be wiped now * @@ -997,15 +1011,17 @@ static void bla_purge_claims(struct bat_priv *bat_priv, } } -/* @bat_priv: the bat priv with all the soft interface information +/** + * @bat_priv: the bat priv with all the soft interface information * @primary_if: the new selected primary_if * @oldif: the old primary interface, may be NULL * * Update the backbone gateways when the own orig address changes. + * */ -void batadv_bla_update_orig_address(struct bat_priv *bat_priv, - struct hard_iface *primary_if, - struct hard_iface *oldif) +void bla_update_orig_address(struct bat_priv *bat_priv, + struct hard_iface *primary_if, + struct hard_iface *oldif) { struct backbone_gw *backbone_gw; struct hlist_node *node; @@ -1054,7 +1070,7 @@ void batadv_bla_update_orig_address(struct bat_priv *bat_priv, static void bla_start_timer(struct bat_priv *bat_priv) { INIT_DELAYED_WORK(&bat_priv->bla_work, bla_periodic_work); - queue_delayed_work(batadv_event_workqueue, &bat_priv->bla_work, + queue_delayed_work(bat_event_workqueue, &bat_priv->bla_work, msecs_to_jiffies(BLA_PERIOD_LENGTH)); } @@ -1120,7 +1136,7 @@ static struct lock_class_key claim_hash_lock_class_key; static struct lock_class_key backbone_hash_lock_class_key; /* initialize all bla structures */ -int batadv_bla_init(struct bat_priv *bat_priv) +int bla_init(struct bat_priv *bat_priv) { int i; uint8_t claim_dest[ETH_ALEN] = {0xff, 0x43, 0x05, 0x00, 0x00, 0x00}; @@ -1150,8 +1166,8 @@ int batadv_bla_init(struct bat_priv *bat_priv) if (bat_priv->claim_hash) return 0; - bat_priv->claim_hash = batadv_hash_new(128); - bat_priv->backbone_hash = batadv_hash_new(32); + bat_priv->claim_hash = hash_new(128); + bat_priv->backbone_hash = hash_new(32); if (!bat_priv->claim_hash || !bat_priv->backbone_hash) return -ENOMEM; @@ -1167,7 +1183,8 @@ int batadv_bla_init(struct bat_priv *bat_priv) return 0; } -/* @bat_priv: the bat priv with all the soft interface information +/** + * @bat_priv: the bat priv with all the soft interface information * @bcast_packet: originator mac address * @hdr_size: maximum length of the frame * @@ -1179,10 +1196,12 @@ int batadv_bla_init(struct bat_priv *bat_priv) * with a good chance that it is the same packet. If it is furthermore * sent by another host, drop it. We allow equal packets from * the same host however as this might be intended. - */ -int batadv_bla_check_bcast_duplist(struct bat_priv *bat_priv, - struct bcast_packet *bcast_packet, - int hdr_size) + * + **/ + +int bla_check_bcast_duplist(struct bat_priv *bat_priv, + struct bcast_packet *bcast_packet, + int hdr_size) { int i, length, curr; uint8_t *content; @@ -1231,14 +1250,17 @@ int batadv_bla_check_bcast_duplist(struct bat_priv *bat_priv, -/* @bat_priv: the bat priv with all the soft interface information +/** + * @bat_priv: the bat priv with all the soft interface information * @orig: originator mac address * * check if the originator is a gateway for any VLAN ID. * * returns 1 if it is found, 0 otherwise + * */ -int batadv_bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig) + +int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig) { struct hashtable_t *hash = bat_priv->backbone_hash; struct hlist_head *head; @@ -1269,16 +1291,18 @@ int batadv_bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig) } -/* @skb: the frame to be checked +/** + * @skb: the frame to be checked * @orig_node: the orig_node of the frame * @hdr_size: maximum length of the frame * * bla_is_backbone_gw inspects the skb for the VLAN ID and returns 1 * if the orig_node is also a gateway on the soft interface, otherwise it * returns 0. + * */ -int batadv_bla_is_backbone_gw(struct sk_buff *skb, - struct orig_node *orig_node, int hdr_size) +int bla_is_backbone_gw(struct sk_buff *skb, + struct orig_node *orig_node, int hdr_size) { struct ethhdr *ethhdr; struct vlan_ethhdr *vhdr; @@ -1304,6 +1328,7 @@ int batadv_bla_is_backbone_gw(struct sk_buff *skb, } /* see if this originator is a backbone gw for this VLAN */ + backbone_gw = backbone_hash_find(orig_node->bat_priv, orig_node->orig, vid); if (!backbone_gw) @@ -1314,7 +1339,7 @@ int batadv_bla_is_backbone_gw(struct sk_buff *skb, } /* free all bla structures (for softinterface free or module unload) */ -void batadv_bla_free(struct bat_priv *bat_priv) +void bla_free(struct bat_priv *bat_priv) { struct hard_iface *primary_if; @@ -1323,19 +1348,20 @@ void batadv_bla_free(struct bat_priv *bat_priv) if (bat_priv->claim_hash) { bla_purge_claims(bat_priv, primary_if, 1); - batadv_hash_destroy(bat_priv->claim_hash); + hash_destroy(bat_priv->claim_hash); bat_priv->claim_hash = NULL; } if (bat_priv->backbone_hash) { bla_purge_backbone_gw(bat_priv, 1); - batadv_hash_destroy(bat_priv->backbone_hash); + hash_destroy(bat_priv->backbone_hash); bat_priv->backbone_hash = NULL; } if (primary_if) hardif_free_ref(primary_if); } -/* @bat_priv: the bat priv with all the soft interface information +/** + * @bat_priv: the bat priv with all the soft interface information * @skb: the frame to be checked * @vid: the VLAN ID of the frame * @@ -1346,8 +1372,9 @@ void batadv_bla_free(struct bat_priv *bat_priv) * in these cases, the skb is further handled by this function and * returns 1, otherwise it returns 0 and the caller shall further * process the skb. + * */ -int batadv_bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid) +int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid) { struct ethhdr *ethhdr; struct claim search_claim, *claim = NULL; @@ -1422,7 +1449,8 @@ int batadv_bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid) return ret; } -/* @bat_priv: the bat priv with all the soft interface information +/** + * @bat_priv: the bat priv with all the soft interface information * @skb: the frame to be checked * @vid: the VLAN ID of the frame * @@ -1433,8 +1461,9 @@ int batadv_bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid) * in these cases, the skb is further handled by this function and * returns 1, otherwise it returns 0 and the caller shall further * process the skb. + * */ -int batadv_bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid) +int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid) { struct ethhdr *ethhdr; struct claim search_claim, *claim = NULL; @@ -1508,7 +1537,7 @@ int batadv_bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid) return ret; } -int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset) +int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset) { struct net_device *net_dev = (struct net_device *)seq->private; struct bat_priv *bat_priv = netdev_priv(net_dev); diff --git a/trunk/net/batman-adv/bridge_loop_avoidance.h b/trunk/net/batman-adv/bridge_loop_avoidance.h index 9818b1e4c59e..e39f93acc28f 100644 --- a/trunk/net/batman-adv/bridge_loop_avoidance.h +++ b/trunk/net/batman-adv/bridge_loop_avoidance.h @@ -1,4 +1,5 @@ -/* Copyright (C) 2011-2012 B.A.T.M.A.N. contributors: +/* + * Copyright (C) 2011-2012 B.A.T.M.A.N. contributors: * * Simon Wunderlich * @@ -15,82 +16,80 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA + * */ #ifndef _NET_BATMAN_ADV_BLA_H_ #define _NET_BATMAN_ADV_BLA_H_ #ifdef CONFIG_BATMAN_ADV_BLA -int batadv_bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid); -int batadv_bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid); -int batadv_bla_is_backbone_gw(struct sk_buff *skb, - struct orig_node *orig_node, int hdr_size); -int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, void *offset); -int batadv_bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig); -int batadv_bla_check_bcast_duplist(struct bat_priv *bat_priv, - struct bcast_packet *bcast_packet, - int hdr_size); -void batadv_bla_update_orig_address(struct bat_priv *bat_priv, - struct hard_iface *primary_if, - struct hard_iface *oldif); -int batadv_bla_init(struct bat_priv *bat_priv); -void batadv_bla_free(struct bat_priv *bat_priv); +int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid); +int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, short vid); +int bla_is_backbone_gw(struct sk_buff *skb, + struct orig_node *orig_node, int hdr_size); +int bla_claim_table_seq_print_text(struct seq_file *seq, void *offset); +int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, uint8_t *orig); +int bla_check_bcast_duplist(struct bat_priv *bat_priv, + struct bcast_packet *bcast_packet, int hdr_size); +void bla_update_orig_address(struct bat_priv *bat_priv, + struct hard_iface *primary_if, + struct hard_iface *oldif); +int bla_init(struct bat_priv *bat_priv); +void bla_free(struct bat_priv *bat_priv); #define BLA_CRC_INIT 0 #else /* ifdef CONFIG_BATMAN_ADV_BLA */ -static inline int batadv_bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, - short vid) +static inline int bla_rx(struct bat_priv *bat_priv, struct sk_buff *skb, + short vid) { return 0; } -static inline int batadv_bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, - short vid) +static inline int bla_tx(struct bat_priv *bat_priv, struct sk_buff *skb, + short vid) { return 0; } -static inline int batadv_bla_is_backbone_gw(struct sk_buff *skb, - struct orig_node *orig_node, - int hdr_size) +static inline int bla_is_backbone_gw(struct sk_buff *skb, + struct orig_node *orig_node, + int hdr_size) { return 0; } -static inline int batadv_bla_claim_table_seq_print_text(struct seq_file *seq, - void *offset) +static inline int bla_claim_table_seq_print_text(struct seq_file *seq, + void *offset) { return 0; } -static inline int batadv_bla_is_backbone_gw_orig(struct bat_priv *bat_priv, - uint8_t *orig) +static inline int bla_is_backbone_gw_orig(struct bat_priv *bat_priv, + uint8_t *orig) { return 0; } -static inline int -batadv_bla_check_bcast_duplist(struct bat_priv *bat_priv, - struct bcast_packet *bcast_packet, - int hdr_size) +static inline int bla_check_bcast_duplist(struct bat_priv *bat_priv, + struct bcast_packet *bcast_packet, + int hdr_size) { return 0; } -static inline void -batadv_bla_update_orig_address(struct bat_priv *bat_priv, - struct hard_iface *primary_if, - struct hard_iface *oldif) +static inline void bla_update_orig_address(struct bat_priv *bat_priv, + struct hard_iface *primary_if, + struct hard_iface *oldif) { } -static inline int batadv_bla_init(struct bat_priv *bat_priv) +static inline int bla_init(struct bat_priv *bat_priv) { return 1; } -static inline void batadv_bla_free(struct bat_priv *bat_priv) +static inline void bla_free(struct bat_priv *bat_priv) { } diff --git a/trunk/net/batman-adv/gateway_client.c b/trunk/net/batman-adv/gateway_client.c index 0d90fffd9efb..47f7186dcefc 100644 --- a/trunk/net/batman-adv/gateway_client.c +++ b/trunk/net/batman-adv/gateway_client.c @@ -1,4 +1,5 @@ -/* Copyright (C) 2009-2012 B.A.T.M.A.N. contributors: +/* + * Copyright (C) 2009-2012 B.A.T.M.A.N. contributors: * * Marek Lindner * @@ -15,6 +16,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA + * */ #include "main.h" @@ -31,8 +33,7 @@ #include /* This is the offset of the options field in a dhcp packet starting at - * the beginning of the dhcp header - */ + * the beginning of the dhcp header */ #define DHCP_OPTIONS_OFFSET 240 #define DHCP_REQUEST 3 @@ -59,7 +60,7 @@ static struct gw_node *gw_get_selected_gw_node(struct bat_priv *bat_priv) return gw_node; } -struct orig_node *batadv_gw_get_selected_orig(struct bat_priv *bat_priv) +struct orig_node *gw_get_selected_orig(struct bat_priv *bat_priv) { struct gw_node *gw_node; struct orig_node *orig_node = NULL; @@ -102,7 +103,7 @@ static void gw_select(struct bat_priv *bat_priv, struct gw_node *new_gw_node) spin_unlock_bh(&bat_priv->gw_list_lock); } -void batadv_gw_deselect(struct bat_priv *bat_priv) +void gw_deselect(struct bat_priv *bat_priv) { atomic_set(&bat_priv->gw_reselect, 1); } @@ -115,15 +116,13 @@ static struct gw_node *gw_get_best_gw_node(struct bat_priv *bat_priv) uint32_t max_gw_factor = 0, tmp_gw_factor = 0; uint8_t max_tq = 0; int down, up; - struct orig_node *orig_node; rcu_read_lock(); hlist_for_each_entry_rcu(gw_node, node, &bat_priv->gw_list, list) { if (gw_node->deleted) continue; - orig_node = gw_node->orig_node; - router = batadv_orig_node_get_router(orig_node); + router = orig_node_get_router(gw_node->orig_node); if (!router) continue; @@ -132,8 +131,8 @@ static struct gw_node *gw_get_best_gw_node(struct bat_priv *bat_priv) switch (atomic_read(&bat_priv->gw_sel_class)) { case 1: /* fast connection */ - batadv_gw_bandwidth_to_kbit(orig_node->gw_flags, - &down, &up); + gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, + &down, &up); tmp_gw_factor = (router->tq_avg * router->tq_avg * down * 100 * 100) / @@ -150,13 +149,14 @@ static struct gw_node *gw_get_best_gw_node(struct bat_priv *bat_priv) } break; - default: /* 2: stable connection (use best statistic) + default: /** + * 2: stable connection (use best statistic) * 3: fast-switch (use best statistic but change as * soon as a better gateway appears) * XX: late-switch (use best statistic but change as * soon as a better gateway appears which has * $routing_class more tq points) - */ + **/ if (router->tq_avg > max_tq) { if (curr_gw) gw_node_free_ref(curr_gw); @@ -175,24 +175,25 @@ static struct gw_node *gw_get_best_gw_node(struct bat_priv *bat_priv) gw_node_free_ref(gw_node); next: - batadv_neigh_node_free_ref(router); + neigh_node_free_ref(router); } rcu_read_unlock(); return curr_gw; } -void batadv_gw_election(struct bat_priv *bat_priv) +void gw_election(struct bat_priv *bat_priv) { struct gw_node *curr_gw = NULL, *next_gw = NULL; struct neigh_node *router = NULL; char gw_addr[18] = { '\0' }; - /* The batman daemon checks here if we already passed a full originator + /** + * The batman daemon checks here if we already passed a full originator * cycle in order to make sure we don't choose the first gateway we * hear about. This check is based on the daemon's uptime which we * don't have. - */ + **/ if (atomic_read(&bat_priv->gw_mode) != GW_MODE_CLIENT) goto out; @@ -209,9 +210,9 @@ void batadv_gw_election(struct bat_priv *bat_priv) if (next_gw) { sprintf(gw_addr, "%pM", next_gw->orig_node->orig); - router = batadv_orig_node_get_router(next_gw->orig_node); + router = orig_node_get_router(next_gw->orig_node); if (!router) { - batadv_gw_deselect(bat_priv); + gw_deselect(bat_priv); goto out; } } @@ -219,19 +220,19 @@ void batadv_gw_election(struct bat_priv *bat_priv) if ((curr_gw) && (!next_gw)) { bat_dbg(DBG_BATMAN, bat_priv, "Removing selected gateway - no gateway in range\n"); - batadv_throw_uevent(bat_priv, UEV_GW, UEV_DEL, NULL); + throw_uevent(bat_priv, UEV_GW, UEV_DEL, NULL); } else if ((!curr_gw) && (next_gw)) { bat_dbg(DBG_BATMAN, bat_priv, "Adding route to gateway %pM (gw_flags: %i, tq: %i)\n", next_gw->orig_node->orig, next_gw->orig_node->gw_flags, router->tq_avg); - batadv_throw_uevent(bat_priv, UEV_GW, UEV_ADD, gw_addr); + throw_uevent(bat_priv, UEV_GW, UEV_ADD, gw_addr); } else { bat_dbg(DBG_BATMAN, bat_priv, "Changing route to gateway %pM (gw_flags: %i, tq: %i)\n", next_gw->orig_node->orig, next_gw->orig_node->gw_flags, router->tq_avg); - batadv_throw_uevent(bat_priv, UEV_GW, UEV_CHANGE, gw_addr); + throw_uevent(bat_priv, UEV_GW, UEV_CHANGE, gw_addr); } gw_select(bat_priv, next_gw); @@ -242,21 +243,20 @@ void batadv_gw_election(struct bat_priv *bat_priv) if (next_gw) gw_node_free_ref(next_gw); if (router) - batadv_neigh_node_free_ref(router); + neigh_node_free_ref(router); } -void batadv_gw_check_election(struct bat_priv *bat_priv, - struct orig_node *orig_node) +void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node) { struct orig_node *curr_gw_orig; struct neigh_node *router_gw = NULL, *router_orig = NULL; uint8_t gw_tq_avg, orig_tq_avg; - curr_gw_orig = batadv_gw_get_selected_orig(bat_priv); + curr_gw_orig = gw_get_selected_orig(bat_priv); if (!curr_gw_orig) goto deselect; - router_gw = batadv_orig_node_get_router(curr_gw_orig); + router_gw = orig_node_get_router(curr_gw_orig); if (!router_gw) goto deselect; @@ -264,7 +264,7 @@ void batadv_gw_check_election(struct bat_priv *bat_priv, if (curr_gw_orig == orig_node) goto out; - router_orig = batadv_orig_node_get_router(orig_node); + router_orig = orig_node_get_router(orig_node); if (!router_orig) goto out; @@ -275,9 +275,10 @@ void batadv_gw_check_election(struct bat_priv *bat_priv, if (orig_tq_avg < gw_tq_avg) goto out; - /* if the routing class is greater than 3 the value tells us how much + /** + * if the routing class is greater than 3 the value tells us how much * greater the TQ value of the new gateway must be - */ + **/ if ((atomic_read(&bat_priv->gw_sel_class) > 3) && (orig_tq_avg - gw_tq_avg < atomic_read(&bat_priv->gw_sel_class))) goto out; @@ -287,14 +288,14 @@ void batadv_gw_check_election(struct bat_priv *bat_priv, gw_tq_avg, orig_tq_avg); deselect: - batadv_gw_deselect(bat_priv); + gw_deselect(bat_priv); out: if (curr_gw_orig) - batadv_orig_node_free_ref(curr_gw_orig); + orig_node_free_ref(curr_gw_orig); if (router_gw) - batadv_neigh_node_free_ref(router_gw); + neigh_node_free_ref(router_gw); if (router_orig) - batadv_neigh_node_free_ref(router_orig); + neigh_node_free_ref(router_orig); return; } @@ -317,7 +318,7 @@ static void gw_node_add(struct bat_priv *bat_priv, hlist_add_head_rcu(&gw_node->list, &bat_priv->gw_list); spin_unlock_bh(&bat_priv->gw_list_lock); - batadv_gw_bandwidth_to_kbit(new_gwflags, &down, &up); + gw_bandwidth_to_kbit(new_gwflags, &down, &up); bat_dbg(DBG_BATMAN, bat_priv, "Found new gateway %pM -> gw_class: %i - %i%s/%i%s\n", orig_node->orig, new_gwflags, @@ -327,13 +328,14 @@ static void gw_node_add(struct bat_priv *bat_priv, (up > 2048 ? "MBit" : "KBit")); } -void batadv_gw_node_update(struct bat_priv *bat_priv, - struct orig_node *orig_node, uint8_t new_gwflags) +void gw_node_update(struct bat_priv *bat_priv, + struct orig_node *orig_node, uint8_t new_gwflags) { struct hlist_node *node; struct gw_node *gw_node, *curr_gw; - /* Note: We don't need a NULL check here, since curr_gw never gets + /** + * Note: We don't need a NULL check here, since curr_gw never gets * dereferenced. If curr_gw is NULL we also should not exit as we may * have this gateway in our list (duplication check!) even though we * have no currently selected gateway. @@ -372,7 +374,7 @@ void batadv_gw_node_update(struct bat_priv *bat_priv, goto unlock; deselect: - batadv_gw_deselect(bat_priv); + gw_deselect(bat_priv); unlock: rcu_read_unlock(); @@ -380,13 +382,12 @@ void batadv_gw_node_update(struct bat_priv *bat_priv, gw_node_free_ref(curr_gw); } -void batadv_gw_node_delete(struct bat_priv *bat_priv, - struct orig_node *orig_node) +void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node) { - batadv_gw_node_update(bat_priv, orig_node, 0); + gw_node_update(bat_priv, orig_node, 0); } -void batadv_gw_node_purge(struct bat_priv *bat_priv) +void gw_node_purge(struct bat_priv *bat_priv) { struct gw_node *gw_node, *curr_gw; struct hlist_node *node, *node_tmp; @@ -415,13 +416,15 @@ void batadv_gw_node_purge(struct bat_priv *bat_priv) /* gw_deselect() needs to acquire the gw_list_lock */ if (do_deselect) - batadv_gw_deselect(bat_priv); + gw_deselect(bat_priv); if (curr_gw) gw_node_free_ref(curr_gw); } -/* fails if orig_node has no router */ +/** + * fails if orig_node has no router + */ static int _write_buffer_text(struct bat_priv *bat_priv, struct seq_file *seq, const struct gw_node *gw_node) { @@ -429,9 +432,9 @@ static int _write_buffer_text(struct bat_priv *bat_priv, struct seq_file *seq, struct neigh_node *router; int down, up, ret = -1; - batadv_gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, &down, &up); + gw_bandwidth_to_kbit(gw_node->orig_node->gw_flags, &down, &up); - router = batadv_orig_node_get_router(gw_node->orig_node); + router = orig_node_get_router(gw_node->orig_node); if (!router) goto out; @@ -448,14 +451,14 @@ static int _write_buffer_text(struct bat_priv *bat_priv, struct seq_file *seq, (up > 2048 ? up / 1024 : up), (up > 2048 ? "MBit" : "KBit")); - batadv_neigh_node_free_ref(router); + neigh_node_free_ref(router); if (curr_gw) gw_node_free_ref(curr_gw); out: return ret; } -int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset) +int gw_client_seq_print_text(struct seq_file *seq, void *offset) { struct net_device *net_dev = (struct net_device *)seq->private; struct bat_priv *bat_priv = netdev_priv(net_dev); @@ -527,14 +530,12 @@ static bool is_type_dhcprequest(struct sk_buff *skb, int header_len) /* Access the dhcp option lists. Each entry is made up by: * - octet 1: option type * - octet 2: option data len (only if type != 255 and 0) - * - octet 3: option data - */ + * - octet 3: option data */ while (*p != 255 && !ret) { /* p now points to the first octet: option type */ if (*p == 53) { /* type 53 is the message type option. - * Jump the len octet and go to the data octet - */ + * Jump the len octet and go to the data octet */ if (pkt_len < 2) goto out; p += 2; @@ -567,7 +568,7 @@ static bool is_type_dhcprequest(struct sk_buff *skb, int header_len) return ret; } -bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len) +bool gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len) { struct ethhdr *ethhdr; struct iphdr *iphdr; @@ -633,8 +634,8 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len) return true; } -bool batadv_gw_out_of_range(struct bat_priv *bat_priv, - struct sk_buff *skb, struct ethhdr *ethhdr) +bool gw_out_of_range(struct bat_priv *bat_priv, + struct sk_buff *skb, struct ethhdr *ethhdr) { struct neigh_node *neigh_curr = NULL, *neigh_old = NULL; struct orig_node *orig_dst_node = NULL; @@ -643,12 +644,12 @@ bool batadv_gw_out_of_range(struct bat_priv *bat_priv, unsigned int header_len = 0; uint8_t curr_tq_avg; - ret = batadv_gw_is_dhcp_target(skb, &header_len); + ret = gw_is_dhcp_target(skb, &header_len); if (!ret) goto out; - orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source, - ethhdr->h_dest); + orig_dst_node = transtable_search(bat_priv, ethhdr->h_source, + ethhdr->h_dest); if (!orig_dst_node) goto out; @@ -662,8 +663,7 @@ bool batadv_gw_out_of_range(struct bat_priv *bat_priv, switch (atomic_read(&bat_priv->gw_mode)) { case GW_MODE_SERVER: /* If we are a GW then we are our best GW. We can artificially - * set the tq towards ourself as the maximum value - */ + * set the tq towards ourself as the maximum value */ curr_tq_avg = TQ_MAX_VALUE; break; case GW_MODE_CLIENT: @@ -677,10 +677,8 @@ bool batadv_gw_out_of_range(struct bat_priv *bat_priv, /* If the dhcp packet has been sent to a different gw, * we have to evaluate whether the old gw is still - * reliable enough - */ - neigh_curr = batadv_find_router(bat_priv, curr_gw->orig_node, - NULL); + * reliable enough */ + neigh_curr = find_router(bat_priv, curr_gw->orig_node, NULL); if (!neigh_curr) goto out; @@ -691,7 +689,7 @@ bool batadv_gw_out_of_range(struct bat_priv *bat_priv, goto out; } - neigh_old = batadv_find_router(bat_priv, orig_dst_node, NULL); + neigh_old = find_router(bat_priv, orig_dst_node, NULL); if (!neigh_old) goto out; @@ -700,12 +698,12 @@ bool batadv_gw_out_of_range(struct bat_priv *bat_priv, out: if (orig_dst_node) - batadv_orig_node_free_ref(orig_dst_node); + orig_node_free_ref(orig_dst_node); if (curr_gw) gw_node_free_ref(curr_gw); if (neigh_old) - batadv_neigh_node_free_ref(neigh_old); + neigh_node_free_ref(neigh_old); if (neigh_curr) - batadv_neigh_node_free_ref(neigh_curr); + neigh_node_free_ref(neigh_curr); return out_of_range; } diff --git a/trunk/net/batman-adv/gateway_client.h b/trunk/net/batman-adv/gateway_client.h index 4529d42894ef..bf56a5aea10b 100644 --- a/trunk/net/batman-adv/gateway_client.h +++ b/trunk/net/batman-adv/gateway_client.h @@ -1,4 +1,5 @@ -/* Copyright (C) 2009-2012 B.A.T.M.A.N. contributors: +/* + * Copyright (C) 2009-2012 B.A.T.M.A.N. contributors: * * Marek Lindner * @@ -15,24 +16,23 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA + * */ #ifndef _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ #define _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ -void batadv_gw_deselect(struct bat_priv *bat_priv); -void batadv_gw_election(struct bat_priv *bat_priv); -struct orig_node *batadv_gw_get_selected_orig(struct bat_priv *bat_priv); -void batadv_gw_check_election(struct bat_priv *bat_priv, - struct orig_node *orig_node); -void batadv_gw_node_update(struct bat_priv *bat_priv, - struct orig_node *orig_node, uint8_t new_gwflags); -void batadv_gw_node_delete(struct bat_priv *bat_priv, - struct orig_node *orig_node); -void batadv_gw_node_purge(struct bat_priv *bat_priv); -int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset); -bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len); -bool batadv_gw_out_of_range(struct bat_priv *bat_priv, - struct sk_buff *skb, struct ethhdr *ethhdr); +void gw_deselect(struct bat_priv *bat_priv); +void gw_election(struct bat_priv *bat_priv); +struct orig_node *gw_get_selected_orig(struct bat_priv *bat_priv); +void gw_check_election(struct bat_priv *bat_priv, struct orig_node *orig_node); +void gw_node_update(struct bat_priv *bat_priv, + struct orig_node *orig_node, uint8_t new_gwflags); +void gw_node_delete(struct bat_priv *bat_priv, struct orig_node *orig_node); +void gw_node_purge(struct bat_priv *bat_priv); +int gw_client_seq_print_text(struct seq_file *seq, void *offset); +bool gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len); +bool gw_out_of_range(struct bat_priv *bat_priv, + struct sk_buff *skb, struct ethhdr *ethhdr); #endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */ diff --git a/trunk/net/batman-adv/gateway_common.c b/trunk/net/batman-adv/gateway_common.c index 3700562cf276..6e3b052b935d 100644 --- a/trunk/net/batman-adv/gateway_common.c +++ b/trunk/net/batman-adv/gateway_common.c @@ -1,4 +1,5 @@ -/* Copyright (C) 2009-2012 B.A.T.M.A.N. contributors: +/* + * Copyright (C) 2009-2012 B.A.T.M.A.N. contributors: * * Marek Lindner * @@ -15,6 +16,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA + * */ #include "main.h" @@ -57,7 +59,7 @@ static void kbit_to_gw_bandwidth(int down, int up, long *gw_srv_class) } /* returns the up and downspeeds in kbit, calculated from the class */ -void batadv_gw_bandwidth_to_kbit(uint8_t gw_srv_class, int *down, int *up) +void gw_bandwidth_to_kbit(uint8_t gw_srv_class, int *down, int *up) { int sbit = (gw_srv_class & 0x80) >> 7; int dpart = (gw_srv_class & 0x78) >> 3; @@ -134,8 +136,7 @@ static bool parse_gw_bandwidth(struct net_device *net_dev, char *buff, return true; } -ssize_t batadv_gw_bandwidth_set(struct net_device *net_dev, char *buff, - size_t count) +ssize_t gw_bandwidth_set(struct net_device *net_dev, char *buff, size_t count) { struct bat_priv *bat_priv = netdev_priv(net_dev); long gw_bandwidth_tmp = 0; @@ -154,16 +155,17 @@ ssize_t batadv_gw_bandwidth_set(struct net_device *net_dev, char *buff, kbit_to_gw_bandwidth(down, up, &gw_bandwidth_tmp); - /* the gw bandwidth we guessed above might not match the given + /** + * the gw bandwidth we guessed above might not match the given * speeds, hence we need to calculate it back to show the number * that is going to be propagated - */ - batadv_gw_bandwidth_to_kbit((uint8_t)gw_bandwidth_tmp, &down, &up); + **/ + gw_bandwidth_to_kbit((uint8_t)gw_bandwidth_tmp, &down, &up); if (atomic_read(&bat_priv->gw_bandwidth) == gw_bandwidth_tmp) return count; - batadv_gw_deselect(bat_priv); + gw_deselect(bat_priv); bat_info(net_dev, "Changing gateway bandwidth from: '%i' to: '%ld' (propagating: %d%s/%d%s)\n", atomic_read(&bat_priv->gw_bandwidth), gw_bandwidth_tmp, diff --git a/trunk/net/batman-adv/gateway_common.h b/trunk/net/batman-adv/gateway_common.h index 6f8a4d0cbbb6..b8fb11c4f927 100644 --- a/trunk/net/batman-adv/gateway_common.h +++ b/trunk/net/batman-adv/gateway_common.h @@ -1,4 +1,5 @@ -/* Copyright (C) 2009-2012 B.A.T.M.A.N. contributors: +/* + * Copyright (C) 2009-2012 B.A.T.M.A.N. contributors: * * Marek Lindner * @@ -15,6 +16,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA + * */ #ifndef _NET_BATMAN_ADV_GATEWAY_COMMON_H_ @@ -30,8 +32,7 @@ enum gw_modes { #define GW_MODE_CLIENT_NAME "client" #define GW_MODE_SERVER_NAME "server" -void batadv_gw_bandwidth_to_kbit(uint8_t gw_class, int *down, int *up); -ssize_t batadv_gw_bandwidth_set(struct net_device *net_dev, char *buff, - size_t count); +void gw_bandwidth_to_kbit(uint8_t gw_class, int *down, int *up); +ssize_t gw_bandwidth_set(struct net_device *net_dev, char *buff, size_t count); #endif /* _NET_BATMAN_ADV_GATEWAY_COMMON_H_ */ diff --git a/trunk/net/batman-adv/hard-interface.c b/trunk/net/batman-adv/hard-interface.c index 2a4d394771b8..ce78c6d645c6 100644 --- a/trunk/net/batman-adv/hard-interface.c +++ b/trunk/net/batman-adv/hard-interface.c @@ -1,4 +1,5 @@ -/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: +/* + * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * @@ -15,6 +16,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA + * */ #include "main.h" @@ -30,7 +32,7 @@ #include -void batadv_hardif_free_rcu(struct rcu_head *rcu) +void hardif_free_rcu(struct rcu_head *rcu) { struct hard_iface *hard_iface; @@ -39,12 +41,12 @@ void batadv_hardif_free_rcu(struct rcu_head *rcu) kfree(hard_iface); } -struct hard_iface *batadv_hardif_get_by_netdev(const struct net_device *net_dev) +struct hard_iface *hardif_get_by_netdev(const struct net_device *net_dev) { struct hard_iface *hard_iface; rcu_read_lock(); - list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { + list_for_each_entry_rcu(hard_iface, &hardif_list, list) { if (hard_iface->net_dev == net_dev && atomic_inc_not_zero(&hard_iface->refcount)) goto out; @@ -69,9 +71,13 @@ static int is_valid_iface(const struct net_device *net_dev) return 0; /* no batman over batman */ - if (batadv_softif_is_valid(net_dev)) + if (softif_is_valid(net_dev)) return 0; + /* Device is being bridged */ + /* if (net_dev->priv_flags & IFF_BRIDGE_PORT) + return 0; */ + return 1; } @@ -80,7 +86,7 @@ static struct hard_iface *hardif_get_active(const struct net_device *soft_iface) struct hard_iface *hard_iface; rcu_read_lock(); - list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { + list_for_each_entry_rcu(hard_iface, &hardif_list, list) { if (hard_iface->soft_iface != soft_iface) continue; @@ -112,7 +118,7 @@ static void primary_if_update_addr(struct bat_priv *bat_priv, memcpy(vis_packet->sender_orig, primary_if->net_dev->dev_addr, ETH_ALEN); - batadv_bla_update_orig_address(bat_priv, primary_if, oldif); + bla_update_orig_address(bat_priv, primary_if, oldif); out: if (primary_if) hardif_free_ref(primary_if); @@ -155,7 +161,7 @@ static void check_known_mac_addr(const struct net_device *net_dev) const struct hard_iface *hard_iface; rcu_read_lock(); - list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { + list_for_each_entry_rcu(hard_iface, &hardif_list, list) { if ((hard_iface->if_status != IF_ACTIVE) && (hard_iface->if_status != IF_TO_BE_ACTIVATED)) continue; @@ -174,20 +180,19 @@ static void check_known_mac_addr(const struct net_device *net_dev) rcu_read_unlock(); } -int batadv_hardif_min_mtu(struct net_device *soft_iface) +int hardif_min_mtu(struct net_device *soft_iface) { const struct bat_priv *bat_priv = netdev_priv(soft_iface); const struct hard_iface *hard_iface; /* allow big frames if all devices are capable to do so - * (have MTU > 1500 + BAT_HEADER_LEN) - */ + * (have MTU > 1500 + BAT_HEADER_LEN) */ int min_mtu = ETH_DATA_LEN; if (atomic_read(&bat_priv->fragmentation)) goto out; rcu_read_lock(); - list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { + list_for_each_entry_rcu(hard_iface, &hardif_list, list) { if ((hard_iface->if_status != IF_ACTIVE) && (hard_iface->if_status != IF_TO_BE_ACTIVATED)) continue; @@ -204,11 +209,11 @@ int batadv_hardif_min_mtu(struct net_device *soft_iface) } /* adjusts the MTU if a new interface with a smaller MTU appeared. */ -void batadv_update_min_mtu(struct net_device *soft_iface) +void update_min_mtu(struct net_device *soft_iface) { int min_mtu; - min_mtu = batadv_hardif_min_mtu(soft_iface); + min_mtu = hardif_min_mtu(soft_iface); if (soft_iface->mtu != min_mtu) soft_iface->mtu = min_mtu; } @@ -226,7 +231,8 @@ static void hardif_activate_interface(struct hard_iface *hard_iface) bat_priv->bat_algo_ops->bat_iface_update_mac(hard_iface); hard_iface->if_status = IF_TO_BE_ACTIVATED; - /* the first active interface becomes our primary interface or + /** + * the first active interface becomes our primary interface or * the next active interface after the old primary interface was removed */ primary_if = primary_if_get_selected(bat_priv); @@ -236,7 +242,7 @@ static void hardif_activate_interface(struct hard_iface *hard_iface) bat_info(hard_iface->soft_iface, "Interface activated: %s\n", hard_iface->net_dev->name); - batadv_update_min_mtu(hard_iface->soft_iface); + update_min_mtu(hard_iface->soft_iface); out: if (primary_if) @@ -254,11 +260,11 @@ static void hardif_deactivate_interface(struct hard_iface *hard_iface) bat_info(hard_iface->soft_iface, "Interface deactivated: %s\n", hard_iface->net_dev->name); - batadv_update_min_mtu(hard_iface->soft_iface); + update_min_mtu(hard_iface->soft_iface); } -int batadv_hardif_enable_interface(struct hard_iface *hard_iface, - const char *iface_name) +int hardif_enable_interface(struct hard_iface *hard_iface, + const char *iface_name) { struct bat_priv *bat_priv; struct net_device *soft_iface; @@ -278,7 +284,7 @@ int batadv_hardif_enable_interface(struct hard_iface *hard_iface, soft_iface = dev_get_by_name(&init_net, iface_name); if (!soft_iface) { - soft_iface = batadv_softif_create(iface_name); + soft_iface = softif_create(iface_name); if (!soft_iface) { ret = -ENOMEM; @@ -289,7 +295,7 @@ int batadv_hardif_enable_interface(struct hard_iface *hard_iface, dev_hold(soft_iface); } - if (!batadv_softif_is_valid(soft_iface)) { + if (!softif_is_valid(soft_iface)) { pr_err("Can't create batman mesh interface %s: already exists as regular interface\n", soft_iface->name); ret = -EINVAL; @@ -306,10 +312,10 @@ int batadv_hardif_enable_interface(struct hard_iface *hard_iface, hard_iface->if_num = bat_priv->num_ifaces; bat_priv->num_ifaces++; hard_iface->if_status = IF_INACTIVE; - batadv_orig_hash_add_if(hard_iface, bat_priv->num_ifaces); + orig_hash_add_if(hard_iface, bat_priv->num_ifaces); hard_iface->batman_adv_ptype.type = __constant_htons(ETH_P_BATMAN); - hard_iface->batman_adv_ptype.func = batadv_batman_skb_recv; + hard_iface->batman_adv_ptype.func = batman_skb_recv; hard_iface->batman_adv_ptype.dev = hard_iface->net_dev; dev_add_pack(&hard_iface->batman_adv_ptype); @@ -339,7 +345,7 @@ int batadv_hardif_enable_interface(struct hard_iface *hard_iface, hard_iface->net_dev->name); /* begin scheduling originator messages on that interface */ - batadv_schedule_bat_ogm(hard_iface); + schedule_bat_ogm(hard_iface); out: return 0; @@ -351,7 +357,7 @@ int batadv_hardif_enable_interface(struct hard_iface *hard_iface, return ret; } -void batadv_hardif_disable_interface(struct hard_iface *hard_iface) +void hardif_disable_interface(struct hard_iface *hard_iface) { struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); struct hard_iface *primary_if = NULL; @@ -367,7 +373,7 @@ void batadv_hardif_disable_interface(struct hard_iface *hard_iface) dev_remove_pack(&hard_iface->batman_adv_ptype); bat_priv->num_ifaces--; - batadv_orig_hash_del_if(hard_iface, bat_priv->num_ifaces); + orig_hash_del_if(hard_iface, bat_priv->num_ifaces); primary_if = primary_if_get_selected(bat_priv); if (hard_iface == primary_if) { @@ -384,13 +390,13 @@ void batadv_hardif_disable_interface(struct hard_iface *hard_iface) hard_iface->if_status = IF_NOT_IN_USE; /* delete all references to this hard_iface */ - batadv_purge_orig_ref(bat_priv); - batadv_purge_outstanding_packets(bat_priv, hard_iface); + purge_orig_ref(bat_priv); + purge_outstanding_packets(bat_priv, hard_iface); dev_put(hard_iface->soft_iface); /* nobody uses this interface anymore */ if (!bat_priv->num_ifaces) - batadv_softif_destroy(hard_iface->soft_iface); + softif_destroy(hard_iface->soft_iface); hard_iface->soft_iface = NULL; hardif_free_ref(hard_iface); @@ -417,7 +423,7 @@ static struct hard_iface *hardif_add_interface(struct net_device *net_dev) if (!hard_iface) goto release_dev; - ret = batadv_sysfs_add_hardif(&hard_iface->hardif_obj, net_dev); + ret = sysfs_add_hardif(&hard_iface->hardif_obj, net_dev); if (ret) goto free_if; @@ -430,9 +436,10 @@ static struct hard_iface *hardif_add_interface(struct net_device *net_dev) atomic_set(&hard_iface->refcount, 2); check_known_mac_addr(hard_iface->net_dev); - list_add_tail_rcu(&hard_iface->list, &batadv_hardif_list); + list_add_tail_rcu(&hard_iface->list, &hardif_list); - /* This can't be called via a bat_priv callback because + /** + * This can't be called via a bat_priv callback because * we have no bat_priv yet. */ atomic_set(&hard_iface->seqno, 1); @@ -454,23 +461,23 @@ static void hardif_remove_interface(struct hard_iface *hard_iface) /* first deactivate interface */ if (hard_iface->if_status != IF_NOT_IN_USE) - batadv_hardif_disable_interface(hard_iface); + hardif_disable_interface(hard_iface); if (hard_iface->if_status != IF_NOT_IN_USE) return; hard_iface->if_status = IF_TO_BE_REMOVED; - batadv_sysfs_del_hardif(&hard_iface->hardif_obj); + sysfs_del_hardif(&hard_iface->hardif_obj); hardif_free_ref(hard_iface); } -void batadv_hardif_remove_interfaces(void) +void hardif_remove_interfaces(void) { struct hard_iface *hard_iface, *hard_iface_tmp; rtnl_lock(); list_for_each_entry_safe(hard_iface, hard_iface_tmp, - &batadv_hardif_list, list) { + &hardif_list, list) { list_del_rcu(&hard_iface->list); hardif_remove_interface(hard_iface); } @@ -481,7 +488,7 @@ static int hard_if_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *net_dev = ptr; - struct hard_iface *hard_iface = batadv_hardif_get_by_netdev(net_dev); + struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev); struct hard_iface *primary_if = NULL; struct bat_priv *bat_priv; @@ -506,7 +513,7 @@ static int hard_if_event(struct notifier_block *this, break; case NETDEV_CHANGEMTU: if (hard_iface->soft_iface) - batadv_update_min_mtu(hard_iface->soft_iface); + update_min_mtu(hard_iface->soft_iface); break; case NETDEV_CHANGEADDR: if (hard_iface->if_status == IF_NOT_IN_USE) @@ -537,9 +544,8 @@ static int hard_if_event(struct notifier_block *this, } /* This function returns true if the interface represented by ifindex is a - * 802.11 wireless device - */ -bool batadv_is_wifi_iface(int ifindex) + * 802.11 wireless device */ +bool is_wifi_iface(int ifindex) { struct net_device *net_device = NULL; bool ret = false; @@ -553,8 +559,7 @@ bool batadv_is_wifi_iface(int ifindex) #ifdef CONFIG_WIRELESS_EXT /* pre-cfg80211 drivers have to implement WEXT, so it is possible to - * check for wireless_handlers != NULL - */ + * check for wireless_handlers != NULL */ if (net_device->wireless_handlers) ret = true; else @@ -568,6 +573,6 @@ bool batadv_is_wifi_iface(int ifindex) return ret; } -struct notifier_block batadv_hard_if_notifier = { +struct notifier_block hard_if_notifier = { .notifier_call = hard_if_event, }; diff --git a/trunk/net/batman-adv/hard-interface.h b/trunk/net/batman-adv/hard-interface.h index 6bc12c0eb2f0..e68c5655e616 100644 --- a/trunk/net/batman-adv/hard-interface.h +++ b/trunk/net/batman-adv/hard-interface.h @@ -1,4 +1,5 @@ -/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: +/* + * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * @@ -15,6 +16,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA + * */ #ifndef _NET_BATMAN_ADV_HARD_INTERFACE_H_ @@ -29,23 +31,23 @@ enum hard_if_state { IF_I_WANT_YOU }; -extern struct notifier_block batadv_hard_if_notifier; +extern struct notifier_block hard_if_notifier; struct hard_iface* -batadv_hardif_get_by_netdev(const struct net_device *net_dev); -int batadv_hardif_enable_interface(struct hard_iface *hard_iface, - const char *iface_name); -void batadv_hardif_disable_interface(struct hard_iface *hard_iface); -void batadv_hardif_remove_interfaces(void); -int batadv_hardif_min_mtu(struct net_device *soft_iface); -void batadv_update_min_mtu(struct net_device *soft_iface); -void batadv_hardif_free_rcu(struct rcu_head *rcu); -bool batadv_is_wifi_iface(int ifindex); +hardif_get_by_netdev(const struct net_device *net_dev); +int hardif_enable_interface(struct hard_iface *hard_iface, + const char *iface_name); +void hardif_disable_interface(struct hard_iface *hard_iface); +void hardif_remove_interfaces(void); +int hardif_min_mtu(struct net_device *soft_iface); +void update_min_mtu(struct net_device *soft_iface); +void hardif_free_rcu(struct rcu_head *rcu); +bool is_wifi_iface(int ifindex); static inline void hardif_free_ref(struct hard_iface *hard_iface) { if (atomic_dec_and_test(&hard_iface->refcount)) - call_rcu(&hard_iface->rcu, batadv_hardif_free_rcu); + call_rcu(&hard_iface->rcu, hardif_free_rcu); } static inline struct hard_iface *primary_if_get_selected( diff --git a/trunk/net/batman-adv/hash.c b/trunk/net/batman-adv/hash.c index e39f8f4bb165..5b2eabe7c4e0 100644 --- a/trunk/net/batman-adv/hash.c +++ b/trunk/net/batman-adv/hash.c @@ -1,4 +1,5 @@ -/* Copyright (C) 2006-2012 B.A.T.M.A.N. contributors: +/* + * Copyright (C) 2006-2012 B.A.T.M.A.N. contributors: * * Simon Wunderlich, Marek Lindner * @@ -15,6 +16,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA + * */ #include "main.h" @@ -32,7 +34,7 @@ static void hash_init(struct hashtable_t *hash) } /* free only the hashtable and the hash itself. */ -void batadv_hash_destroy(struct hashtable_t *hash) +void hash_destroy(struct hashtable_t *hash) { kfree(hash->list_locks); kfree(hash->table); @@ -40,7 +42,7 @@ void batadv_hash_destroy(struct hashtable_t *hash) } /* allocates and clears the hash */ -struct hashtable_t *batadv_hash_new(uint32_t size) +struct hashtable_t *hash_new(uint32_t size) { struct hashtable_t *hash; diff --git a/trunk/net/batman-adv/hash.h b/trunk/net/batman-adv/hash.h index eba8f2a55ccc..3d67ce49fc31 100644 --- a/trunk/net/batman-adv/hash.h +++ b/trunk/net/batman-adv/hash.h @@ -1,4 +1,5 @@ -/* Copyright (C) 2006-2012 B.A.T.M.A.N. contributors: +/* + * Copyright (C) 2006-2012 B.A.T.M.A.N. contributors: * * Simon Wunderlich, Marek Lindner * @@ -15,6 +16,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA + * */ #ifndef _NET_BATMAN_ADV_HASH_H_ @@ -22,15 +24,15 @@ #include -/* callback to a compare function. should compare 2 element datas for their - * keys, return 0 if same and not 0 if not same - */ +/* callback to a compare function. should + * compare 2 element datas for their keys, + * return 0 if same and not 0 if not + * same */ typedef int (*hashdata_compare_cb)(const struct hlist_node *, const void *); /* the hashfunction, should return an index * based on the key in the data of the first - * argument and the size the second - */ + * argument and the size the second */ typedef uint32_t (*hashdata_choose_cb)(const void *, uint32_t); typedef void (*hashdata_free_cb)(struct hlist_node *, void *); @@ -41,19 +43,18 @@ struct hashtable_t { }; /* allocates and clears the hash */ -struct hashtable_t *batadv_hash_new(uint32_t size); +struct hashtable_t *hash_new(uint32_t size); /* set class key for all locks */ void batadv_hash_set_lock_class(struct hashtable_t *hash, struct lock_class_key *key); /* free only the hashtable and the hash itself. */ -void batadv_hash_destroy(struct hashtable_t *hash); +void hash_destroy(struct hashtable_t *hash); /* remove the hash structure. if hashdata_free_cb != NULL, this function will be * called to remove the elements inside of the hash. if you don't remove the - * elements, memory might be leaked. - */ + * elements, memory might be leaked. */ static inline void hash_delete(struct hashtable_t *hash, hashdata_free_cb free_cb, void *arg) { @@ -76,10 +77,11 @@ static inline void hash_delete(struct hashtable_t *hash, spin_unlock_bh(list_lock); } - batadv_hash_destroy(hash); + hash_destroy(hash); } -/* hash_add - adds data to the hashtable +/** + * hash_add - adds data to the hashtable * @hash: storage hash table * @compare: callback to determine if 2 hash elements are identical * @choose: callback calculating the hash index @@ -89,6 +91,7 @@ static inline void hash_delete(struct hashtable_t *hash, * Returns 0 on success, 1 if the element already is in the hash * and -1 on error. */ + static inline int hash_add(struct hashtable_t *hash, hashdata_compare_cb compare, hashdata_choose_cb choose, @@ -131,8 +134,7 @@ static inline int hash_add(struct hashtable_t *hash, /* removes data from hash, if found. returns pointer do data on success, so you * can remove the used structure yourself, or NULL on error . data could be the * structure you use with just the key filled, we just need the key for - * comparing. - */ + * comparing. */ static inline void *hash_remove(struct hashtable_t *hash, hashdata_compare_cb compare, hashdata_choose_cb choose, void *data) diff --git a/trunk/net/batman-adv/icmp_socket.c b/trunk/net/batman-adv/icmp_socket.c index 40c5e189e6fd..d27db8192e93 100644 --- a/trunk/net/batman-adv/icmp_socket.c +++ b/trunk/net/batman-adv/icmp_socket.c @@ -1,4 +1,5 @@ -/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: +/* + * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: * * Marek Lindner * @@ -15,6 +16,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA + * */ #include "main.h" @@ -32,7 +34,7 @@ static void bat_socket_add_packet(struct socket_client *socket_client, struct icmp_packet_rr *icmp_packet, size_t icmp_len); -void batadv_socket_init(void) +void bat_socket_init(void) { memset(socket_client_hash, 0, sizeof(socket_client_hash)); } @@ -71,7 +73,7 @@ static int bat_socket_open(struct inode *inode, struct file *file) file->private_data = socket_client; - batadv_inc_module_count(); + inc_module_count(); return 0; } @@ -96,7 +98,7 @@ static int bat_socket_release(struct inode *inode, struct file *file) spin_unlock_bh(&socket_client->lock); kfree(socket_client); - batadv_dec_module_count(); + dec_module_count(); return 0; } @@ -217,7 +219,7 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff, if (!orig_node) goto dst_unreach; - neigh_node = batadv_orig_node_get_router(orig_node); + neigh_node = orig_node_get_router(orig_node); if (!neigh_node) goto dst_unreach; @@ -234,7 +236,7 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff, memcpy(icmp_packet->rr, neigh_node->if_incoming->net_dev->dev_addr, ETH_ALEN); - batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); + send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); goto out; dst_unreach: @@ -246,9 +248,9 @@ static ssize_t bat_socket_write(struct file *file, const char __user *buff, if (primary_if) hardif_free_ref(primary_if); if (neigh_node) - batadv_neigh_node_free_ref(neigh_node); + neigh_node_free_ref(neigh_node); if (orig_node) - batadv_orig_node_free_ref(orig_node); + orig_node_free_ref(orig_node); return len; } @@ -274,7 +276,7 @@ static const struct file_operations fops = { .llseek = no_llseek, }; -int batadv_socket_setup(struct bat_priv *bat_priv) +int bat_socket_setup(struct bat_priv *bat_priv) { struct dentry *d; @@ -310,8 +312,7 @@ static void bat_socket_add_packet(struct socket_client *socket_client, spin_lock_bh(&socket_client->lock); /* while waiting for the lock the socket_client could have been - * deleted - */ + * deleted */ if (!socket_client_hash[icmp_packet->uid]) { spin_unlock_bh(&socket_client->lock); kfree(socket_packet); @@ -335,8 +336,8 @@ static void bat_socket_add_packet(struct socket_client *socket_client, wake_up(&socket_client->queue_wait); } -void batadv_socket_receive_packet(struct icmp_packet_rr *icmp_packet, - size_t icmp_len) +void bat_socket_receive_packet(struct icmp_packet_rr *icmp_packet, + size_t icmp_len) { struct socket_client *hash = socket_client_hash[icmp_packet->uid]; diff --git a/trunk/net/batman-adv/icmp_socket.h b/trunk/net/batman-adv/icmp_socket.h index a62ab80df9bd..380ed4c2443a 100644 --- a/trunk/net/batman-adv/icmp_socket.h +++ b/trunk/net/batman-adv/icmp_socket.h @@ -1,4 +1,5 @@ -/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: +/* + * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: * * Marek Lindner * @@ -15,6 +16,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA + * */ #ifndef _NET_BATMAN_ADV_ICMP_SOCKET_H_ @@ -22,9 +24,9 @@ #define ICMP_SOCKET "socket" -void batadv_socket_init(void); -int batadv_socket_setup(struct bat_priv *bat_priv); -void batadv_socket_receive_packet(struct icmp_packet_rr *icmp_packet, - size_t icmp_len); +void bat_socket_init(void); +int bat_socket_setup(struct bat_priv *bat_priv); +void bat_socket_receive_packet(struct icmp_packet_rr *icmp_packet, + size_t icmp_len); #endif /* _NET_BATMAN_ADV_ICMP_SOCKET_H_ */ diff --git a/trunk/net/batman-adv/main.c b/trunk/net/batman-adv/main.c index d56d6b2e1924..46ba302d2d01 100644 --- a/trunk/net/batman-adv/main.c +++ b/trunk/net/batman-adv/main.c @@ -1,4 +1,5 @@ -/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: +/* + * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * @@ -15,6 +16,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA + * */ #include "main.h" @@ -35,40 +37,38 @@ /* List manipulations on hardif_list have to be rtnl_lock()'ed, - * list traversals just rcu-locked - */ -struct list_head batadv_hardif_list; + * list traversals just rcu-locked */ +struct list_head hardif_list; static int (*recv_packet_handler[256])(struct sk_buff *, struct hard_iface *); -char batadv_routing_algo[20] = "BATMAN_IV"; +char bat_routing_algo[20] = "BATMAN_IV"; static struct hlist_head bat_algo_list; -unsigned char batadv_broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; +unsigned char broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; -struct workqueue_struct *batadv_event_workqueue; +struct workqueue_struct *bat_event_workqueue; static void recv_handler_init(void); static int __init batman_init(void) { - INIT_LIST_HEAD(&batadv_hardif_list); + INIT_LIST_HEAD(&hardif_list); INIT_HLIST_HEAD(&bat_algo_list); recv_handler_init(); - batadv_iv_init(); + bat_iv_init(); /* the name should not be longer than 10 chars - see - * http://lwn.net/Articles/23634/ - */ - batadv_event_workqueue = create_singlethread_workqueue("bat_events"); + * http://lwn.net/Articles/23634/ */ + bat_event_workqueue = create_singlethread_workqueue("bat_events"); - if (!batadv_event_workqueue) + if (!bat_event_workqueue) return -ENOMEM; - batadv_socket_init(); - batadv_debugfs_init(); + bat_socket_init(); + debugfs_init(); - register_netdevice_notifier(&batadv_hard_if_notifier); + register_netdevice_notifier(&hard_if_notifier); pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n", SOURCE_VERSION, COMPAT_VERSION); @@ -78,18 +78,18 @@ static int __init batman_init(void) static void __exit batman_exit(void) { - batadv_debugfs_destroy(); - unregister_netdevice_notifier(&batadv_hard_if_notifier); - batadv_hardif_remove_interfaces(); + debugfs_destroy(); + unregister_netdevice_notifier(&hard_if_notifier); + hardif_remove_interfaces(); - flush_workqueue(batadv_event_workqueue); - destroy_workqueue(batadv_event_workqueue); - batadv_event_workqueue = NULL; + flush_workqueue(bat_event_workqueue); + destroy_workqueue(bat_event_workqueue); + bat_event_workqueue = NULL; rcu_barrier(); } -int batadv_mesh_init(struct net_device *soft_iface) +int mesh_init(struct net_device *soft_iface) { struct bat_priv *bat_priv = netdev_priv(soft_iface); int ret; @@ -111,21 +111,21 @@ int batadv_mesh_init(struct net_device *soft_iface) INIT_LIST_HEAD(&bat_priv->tt_req_list); INIT_LIST_HEAD(&bat_priv->tt_roam_list); - ret = batadv_originator_init(bat_priv); + ret = originator_init(bat_priv); if (ret < 0) goto err; - ret = batadv_tt_init(bat_priv); + ret = tt_init(bat_priv); if (ret < 0) goto err; - batadv_tt_local_add(soft_iface, soft_iface->dev_addr, NULL_IFINDEX); + tt_local_add(soft_iface, soft_iface->dev_addr, NULL_IFINDEX); - ret = batadv_vis_init(bat_priv); + ret = vis_init(bat_priv); if (ret < 0) goto err; - ret = batadv_bla_init(bat_priv); + ret = bla_init(bat_priv); if (ret < 0) goto err; @@ -135,48 +135,48 @@ int batadv_mesh_init(struct net_device *soft_iface) return 0; err: - batadv_mesh_free(soft_iface); + mesh_free(soft_iface); return ret; } -void batadv_mesh_free(struct net_device *soft_iface) +void mesh_free(struct net_device *soft_iface) { struct bat_priv *bat_priv = netdev_priv(soft_iface); atomic_set(&bat_priv->mesh_state, MESH_DEACTIVATING); - batadv_purge_outstanding_packets(bat_priv, NULL); + purge_outstanding_packets(bat_priv, NULL); - batadv_vis_quit(bat_priv); + vis_quit(bat_priv); - batadv_gw_node_purge(bat_priv); - batadv_originator_free(bat_priv); + gw_node_purge(bat_priv); + originator_free(bat_priv); - batadv_tt_free(bat_priv); + tt_free(bat_priv); - batadv_bla_free(bat_priv); + bla_free(bat_priv); free_percpu(bat_priv->bat_counters); atomic_set(&bat_priv->mesh_state, MESH_INACTIVE); } -void batadv_inc_module_count(void) +void inc_module_count(void) { try_module_get(THIS_MODULE); } -void batadv_dec_module_count(void) +void dec_module_count(void) { module_put(THIS_MODULE); } -int batadv_is_my_mac(const uint8_t *addr) +int is_my_mac(const uint8_t *addr) { const struct hard_iface *hard_iface; rcu_read_lock(); - list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { + list_for_each_entry_rcu(hard_iface, &hardif_list, list) { if (hard_iface->if_status != IF_ACTIVE) continue; @@ -198,9 +198,8 @@ static int recv_unhandled_packet(struct sk_buff *skb, /* incoming packets with the batman ethertype received on any active hard * interface */ -int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev, - struct packet_type *ptype, - struct net_device *orig_dev) +int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *ptype, struct net_device *orig_dev) { struct bat_priv *bat_priv; struct batman_ogm_packet *batman_ogm_packet; @@ -273,24 +272,24 @@ static void recv_handler_init(void) recv_packet_handler[i] = recv_unhandled_packet; /* batman icmp packet */ - recv_packet_handler[BAT_ICMP] = batadv_recv_icmp_packet; + recv_packet_handler[BAT_ICMP] = recv_icmp_packet; /* unicast packet */ - recv_packet_handler[BAT_UNICAST] = batadv_recv_unicast_packet; + recv_packet_handler[BAT_UNICAST] = recv_unicast_packet; /* fragmented unicast packet */ - recv_packet_handler[BAT_UNICAST_FRAG] = batadv_recv_ucast_frag_packet; + recv_packet_handler[BAT_UNICAST_FRAG] = recv_ucast_frag_packet; /* broadcast packet */ - recv_packet_handler[BAT_BCAST] = batadv_recv_bcast_packet; + recv_packet_handler[BAT_BCAST] = recv_bcast_packet; /* vis packet */ - recv_packet_handler[BAT_VIS] = batadv_recv_vis_packet; + recv_packet_handler[BAT_VIS] = recv_vis_packet; /* Translation table query (request or response) */ - recv_packet_handler[BAT_TT_QUERY] = batadv_recv_tt_query; + recv_packet_handler[BAT_TT_QUERY] = recv_tt_query; /* Roaming advertisement */ - recv_packet_handler[BAT_ROAM_ADV] = batadv_recv_roam_adv; + recv_packet_handler[BAT_ROAM_ADV] = recv_roam_adv; } -int batadv_recv_handler_register(uint8_t packet_type, - int (*recv_handler)(struct sk_buff *, - struct hard_iface *)) +int recv_handler_register(uint8_t packet_type, + int (*recv_handler)(struct sk_buff *, + struct hard_iface *)) { if (recv_packet_handler[packet_type] != &recv_unhandled_packet) return -EBUSY; @@ -299,7 +298,7 @@ int batadv_recv_handler_register(uint8_t packet_type, return 0; } -void batadv_recv_handler_unregister(uint8_t packet_type) +void recv_handler_unregister(uint8_t packet_type) { recv_packet_handler[packet_type] = recv_unhandled_packet; } @@ -320,7 +319,7 @@ static struct bat_algo_ops *bat_algo_get(char *name) return bat_algo_ops; } -int batadv_algo_register(struct bat_algo_ops *bat_algo_ops) +int bat_algo_register(struct bat_algo_ops *bat_algo_ops) { struct bat_algo_ops *bat_algo_ops_tmp; int ret; @@ -354,7 +353,7 @@ int batadv_algo_register(struct bat_algo_ops *bat_algo_ops) return ret; } -int batadv_algo_select(struct bat_priv *bat_priv, char *name) +int bat_algo_select(struct bat_priv *bat_priv, char *name) { struct bat_algo_ops *bat_algo_ops; int ret = -EINVAL; @@ -370,7 +369,7 @@ int batadv_algo_select(struct bat_priv *bat_priv, char *name) return ret; } -int batadv_algo_seq_print_text(struct seq_file *seq, void *offset) +int bat_algo_seq_print_text(struct seq_file *seq, void *offset) { struct bat_algo_ops *bat_algo_ops; struct hlist_node *node; @@ -408,8 +407,8 @@ static const struct kernel_param_ops param_ops_ra = { }; static struct kparam_string __param_string_ra = { - .maxlen = sizeof(batadv_routing_algo), - .string = batadv_routing_algo, + .maxlen = sizeof(bat_routing_algo), + .string = bat_routing_algo, }; module_param_cb(routing_algo, ¶m_ops_ra, &__param_string_ra, 0644); diff --git a/trunk/net/batman-adv/main.h b/trunk/net/batman-adv/main.h index 4b06b7621e7a..6e0cbdc48321 100644 --- a/trunk/net/batman-adv/main.h +++ b/trunk/net/batman-adv/main.h @@ -1,4 +1,5 @@ -/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: +/* + * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * @@ -15,6 +16,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA + * */ #ifndef _NET_BATMAN_ADV_MAIN_H_ @@ -34,21 +36,19 @@ #define TQ_MAX_VALUE 255 #define JITTER 20 -/* Time To Live of broadcast messages */ + /* Time To Live of broadcast messages */ #define TTL 50 /* purge originators after time in seconds if no valid packet comes in - * -> TODO: check influence on TQ_LOCAL_WINDOW_SIZE - */ + * -> TODO: check influence on TQ_LOCAL_WINDOW_SIZE */ #define PURGE_TIMEOUT 200000 /* 200 seconds */ #define TT_LOCAL_TIMEOUT 3600000 /* in miliseconds */ #define TT_CLIENT_ROAM_TIMEOUT 600000 /* in miliseconds */ /* sliding packet range of received originator messages in sequence numbers - * (should be a multiple of our word size) - */ + * (should be a multiple of our word size) */ #define TQ_LOCAL_WINDOW_SIZE 64 -/* miliseconds we have to keep pending tt_req */ -#define TT_REQUEST_TIMEOUT 3000 +#define TT_REQUEST_TIMEOUT 3000 /* miliseconds we have to keep + * pending tt_req */ #define TQ_GLOBAL_WINDOW_SIZE 5 #define TQ_LOCAL_BIDRECT_SEND_MINIMUM 1 @@ -57,10 +57,8 @@ #define TT_OGM_APPEND_MAX 3 /* number of OGMs sent with the last tt diff */ -/* Time in which a client can roam at most ROAMING_MAX_COUNT times in - * miliseconds - */ -#define ROAMING_MAX_TIME 20000 +#define ROAMING_MAX_TIME 20000 /* Time in which a client can roam at most + * ROAMING_MAX_COUNT times in miliseconds*/ #define ROAMING_MAX_COUNT 5 #define NO_FLAGS 0 @@ -74,13 +72,11 @@ #define VIS_INTERVAL 5000 /* 5 seconds */ /* how much worse secondary interfaces may be to be considered as bonding - * candidates - */ + * candidates */ #define BONDING_TQ_THRESHOLD 50 /* should not be bigger than 512 bytes or change the size of - * forw_packet->direct_link_flags - */ + * forw_packet->direct_link_flags */ #define MAX_AGGREGATION_BYTES 512 #define MAX_AGGREGATION_MS 100 @@ -149,36 +145,34 @@ enum dbg_level { #include #include "types.h" -extern char batadv_routing_algo[]; -extern struct list_head batadv_hardif_list; - -extern unsigned char batadv_broadcast_addr[]; -extern struct workqueue_struct *batadv_event_workqueue; - -int batadv_mesh_init(struct net_device *soft_iface); -void batadv_mesh_free(struct net_device *soft_iface); -void batadv_inc_module_count(void); -void batadv_dec_module_count(void); -int batadv_is_my_mac(const uint8_t *addr); -int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev, - struct packet_type *ptype, - struct net_device *orig_dev); -int batadv_recv_handler_register(uint8_t packet_type, - int (*recv_handler)(struct sk_buff *, - struct hard_iface *)); -void batadv_recv_handler_unregister(uint8_t packet_type); -int batadv_algo_register(struct bat_algo_ops *bat_algo_ops); -int batadv_algo_select(struct bat_priv *bat_priv, char *name); -int batadv_algo_seq_print_text(struct seq_file *seq, void *offset); +extern char bat_routing_algo[]; +extern struct list_head hardif_list; + +extern unsigned char broadcast_addr[]; +extern struct workqueue_struct *bat_event_workqueue; + +int mesh_init(struct net_device *soft_iface); +void mesh_free(struct net_device *soft_iface); +void inc_module_count(void); +void dec_module_count(void); +int is_my_mac(const uint8_t *addr); +int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *ptype, struct net_device *orig_dev); +int recv_handler_register(uint8_t packet_type, + int (*recv_handler)(struct sk_buff *, + struct hard_iface *)); +void recv_handler_unregister(uint8_t packet_type); +int bat_algo_register(struct bat_algo_ops *bat_algo_ops); +int bat_algo_select(struct bat_priv *bat_priv, char *name); +int bat_algo_seq_print_text(struct seq_file *seq, void *offset); #ifdef CONFIG_BATMAN_ADV_DEBUG -int batadv_debug_log(struct bat_priv *bat_priv, const char *fmt, ...) -__printf(2, 3); +int debug_log(struct bat_priv *bat_priv, const char *fmt, ...) __printf(2, 3); #define bat_dbg(type, bat_priv, fmt, arg...) \ do { \ if (atomic_read(&bat_priv->log_level) & type) \ - batadv_debug_log(bat_priv, fmt, ## arg);\ + debug_log(bat_priv, fmt, ## arg); \ } \ while (0) #else /* !CONFIG_BATMAN_ADV_DEBUG */ @@ -205,16 +199,19 @@ static inline void bat_dbg(int type __always_unused, pr_err("%s: " fmt, _netdev->name, ## arg); \ } while (0) -/* returns 1 if they are the same ethernet addr +/** + * returns 1 if they are the same ethernet addr * * note: can't use compare_ether_addr() as it requires aligned memory */ + static inline int compare_eth(const void *data1, const void *data2) { return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); } -/* has_timed_out - compares current time (jiffies) and timestamp + timeout +/** + * has_timed_out - compares current time (jiffies) and timestamp + timeout * @timestamp: base value to compare with (in jiffies) * @timeout: added to base value before comparing (in milliseconds) * @@ -238,8 +235,7 @@ static inline bool has_timed_out(unsigned long timestamp, unsigned int timeout) * - when adding nothing - it is neither a predecessor nor a successor * - before adding more than 127 to the starting value - it is a predecessor, * - when adding 128 - it is neither a predecessor nor a successor, - * - after adding more than 127 to the starting value - it is a successor - */ + * - after adding more than 127 to the starting value - it is a successor */ #define seq_before(x, y) ({typeof(x) _d1 = (x); \ typeof(y) _d2 = (y); \ typeof(x) _dummy = (_d1 - _d2); \ diff --git a/trunk/net/batman-adv/originator.c b/trunk/net/batman-adv/originator.c index 86e7e082c2bc..cf83c5422e9a 100644 --- a/trunk/net/batman-adv/originator.c +++ b/trunk/net/batman-adv/originator.c @@ -1,4 +1,5 @@ -/* Copyright (C) 2009-2012 B.A.T.M.A.N. contributors: +/* + * Copyright (C) 2009-2012 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * @@ -15,6 +16,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA + * */ #include "main.h" @@ -33,7 +35,7 @@ static void purge_orig(struct work_struct *work); static void start_purge_timer(struct bat_priv *bat_priv) { INIT_DELAYED_WORK(&bat_priv->orig_work, purge_orig); - queue_delayed_work(batadv_event_workqueue, + queue_delayed_work(bat_event_workqueue, &bat_priv->orig_work, msecs_to_jiffies(1000)); } @@ -45,12 +47,12 @@ static int compare_orig(const struct hlist_node *node, const void *data2) return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); } -int batadv_originator_init(struct bat_priv *bat_priv) +int originator_init(struct bat_priv *bat_priv) { if (bat_priv->orig_hash) return 0; - bat_priv->orig_hash = batadv_hash_new(1024); + bat_priv->orig_hash = hash_new(1024); if (!bat_priv->orig_hash) goto err; @@ -62,14 +64,14 @@ int batadv_originator_init(struct bat_priv *bat_priv) return -ENOMEM; } -void batadv_neigh_node_free_ref(struct neigh_node *neigh_node) +void neigh_node_free_ref(struct neigh_node *neigh_node) { if (atomic_dec_and_test(&neigh_node->refcount)) kfree_rcu(neigh_node, rcu); } /* increases the refcounter of a found router */ -struct neigh_node *batadv_orig_node_get_router(struct orig_node *orig_node) +struct neigh_node *orig_node_get_router(struct orig_node *orig_node) { struct neigh_node *router; @@ -124,21 +126,21 @@ static void orig_node_free_rcu(struct rcu_head *rcu) list_for_each_entry_safe(neigh_node, tmp_neigh_node, &orig_node->bond_list, bonding_list) { list_del_rcu(&neigh_node->bonding_list); - batadv_neigh_node_free_ref(neigh_node); + neigh_node_free_ref(neigh_node); } /* for all neighbors towards this originator ... */ hlist_for_each_entry_safe(neigh_node, node, node_tmp, &orig_node->neigh_list, list) { hlist_del_rcu(&neigh_node->list); - batadv_neigh_node_free_ref(neigh_node); + neigh_node_free_ref(neigh_node); } spin_unlock_bh(&orig_node->neigh_list_lock); - batadv_frag_list_free(&orig_node->frag_list); - batadv_tt_global_del_orig(orig_node->bat_priv, orig_node, - "originator timed out"); + frag_list_free(&orig_node->frag_list); + tt_global_del_orig(orig_node->bat_priv, orig_node, + "originator timed out"); kfree(orig_node->tt_buff); kfree(orig_node->bcast_own); @@ -146,13 +148,13 @@ static void orig_node_free_rcu(struct rcu_head *rcu) kfree(orig_node); } -void batadv_orig_node_free_ref(struct orig_node *orig_node) +void orig_node_free_ref(struct orig_node *orig_node) { if (atomic_dec_and_test(&orig_node->refcount)) call_rcu(&orig_node->rcu, orig_node_free_rcu); } -void batadv_originator_free(struct bat_priv *bat_priv) +void originator_free(struct bat_priv *bat_priv) { struct hashtable_t *hash = bat_priv->orig_hash; struct hlist_node *node, *node_tmp; @@ -177,19 +179,17 @@ void batadv_originator_free(struct bat_priv *bat_priv) head, hash_entry) { hlist_del_rcu(node); - batadv_orig_node_free_ref(orig_node); + orig_node_free_ref(orig_node); } spin_unlock_bh(list_lock); } - batadv_hash_destroy(hash); + hash_destroy(hash); } /* this function finds or creates an originator entry for the given - * address if it does not exits - */ -struct orig_node *batadv_get_orig_node(struct bat_priv *bat_priv, - const uint8_t *addr) + * address if it does not exits */ +struct orig_node *get_orig_node(struct bat_priv *bat_priv, const uint8_t *addr) { struct orig_node *orig_node; int size; @@ -306,8 +306,8 @@ static bool purge_orig_neighbors(struct bat_priv *bat_priv, neigh_purged = true; hlist_del_rcu(&neigh_node->list); - batadv_bonding_candidate_del(orig_node, neigh_node); - batadv_neigh_node_free_ref(neigh_node); + bonding_candidate_del(orig_node, neigh_node); + neigh_node_free_ref(neigh_node); } else { if ((!*best_neigh_node) || (neigh_node->tq_avg > (*best_neigh_node)->tq_avg)) @@ -333,8 +333,7 @@ static bool purge_orig_node(struct bat_priv *bat_priv, } else { if (purge_orig_neighbors(bat_priv, orig_node, &best_neigh_node)) - batadv_update_route(bat_priv, orig_node, - best_neigh_node); + update_route(bat_priv, orig_node, best_neigh_node); } return false; @@ -362,22 +361,21 @@ static void _purge_orig(struct bat_priv *bat_priv) head, hash_entry) { if (purge_orig_node(bat_priv, orig_node)) { if (orig_node->gw_flags) - batadv_gw_node_delete(bat_priv, - orig_node); + gw_node_delete(bat_priv, orig_node); hlist_del_rcu(node); - batadv_orig_node_free_ref(orig_node); + orig_node_free_ref(orig_node); continue; } if (has_timed_out(orig_node->last_frag_packet, FRAG_TIMEOUT)) - batadv_frag_list_free(&orig_node->frag_list); + frag_list_free(&orig_node->frag_list); } spin_unlock_bh(list_lock); } - batadv_gw_node_purge(bat_priv); - batadv_gw_election(bat_priv); + gw_node_purge(bat_priv); + gw_election(bat_priv); } static void purge_orig(struct work_struct *work) @@ -391,12 +389,12 @@ static void purge_orig(struct work_struct *work) start_purge_timer(bat_priv); } -void batadv_purge_orig_ref(struct bat_priv *bat_priv) +void purge_orig_ref(struct bat_priv *bat_priv) { _purge_orig(bat_priv); } -int batadv_orig_seq_print_text(struct seq_file *seq, void *offset) +int orig_seq_print_text(struct seq_file *seq, void *offset) { struct net_device *net_dev = (struct net_device *)seq->private; struct bat_priv *bat_priv = netdev_priv(net_dev); @@ -440,7 +438,7 @@ int batadv_orig_seq_print_text(struct seq_file *seq, void *offset) rcu_read_lock(); hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { - neigh_node = batadv_orig_node_get_router(orig_node); + neigh_node = orig_node_get_router(orig_node); if (!neigh_node) continue; @@ -469,7 +467,7 @@ int batadv_orig_seq_print_text(struct seq_file *seq, void *offset) batman_count++; next: - batadv_neigh_node_free_ref(neigh_node); + neigh_node_free_ref(neigh_node); } rcu_read_unlock(); } @@ -509,7 +507,7 @@ static int orig_node_add_if(struct orig_node *orig_node, int max_if_num) return 0; } -int batadv_orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num) +int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num) { struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); struct hashtable_t *hash = bat_priv->orig_hash; @@ -520,8 +518,7 @@ int batadv_orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num) int ret; /* resize all orig nodes because orig_node->bcast_own(_sum) depend on - * if_num - */ + * if_num */ for (i = 0; i < hash->size; i++) { head = &hash->table[i]; @@ -592,7 +589,7 @@ static int orig_node_del_if(struct orig_node *orig_node, return 0; } -int batadv_orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num) +int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num) { struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); struct hashtable_t *hash = bat_priv->orig_hash; @@ -604,8 +601,7 @@ int batadv_orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num) int ret; /* resize all orig nodes because orig_node->bcast_own(_sum) depend on - * if_num - */ + * if_num */ for (i = 0; i < hash->size; i++) { head = &hash->table[i]; @@ -624,7 +620,7 @@ int batadv_orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num) /* renumber remaining batman interfaces _inside_ of orig_hash_lock */ rcu_read_lock(); - list_for_each_entry_rcu(hard_iface_tmp, &batadv_hardif_list, list) { + list_for_each_entry_rcu(hard_iface_tmp, &hardif_list, list) { if (hard_iface_tmp->if_status == IF_NOT_IN_USE) continue; diff --git a/trunk/net/batman-adv/originator.h b/trunk/net/batman-adv/originator.h index a72171997056..f74d0d693359 100644 --- a/trunk/net/batman-adv/originator.h +++ b/trunk/net/batman-adv/originator.h @@ -1,4 +1,5 @@ -/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: +/* + * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * @@ -15,6 +16,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA + * */ #ifndef _NET_BATMAN_ADV_ORIGINATOR_H_ @@ -22,25 +24,23 @@ #include "hash.h" -int batadv_originator_init(struct bat_priv *bat_priv); -void batadv_originator_free(struct bat_priv *bat_priv); -void batadv_purge_orig_ref(struct bat_priv *bat_priv); -void batadv_orig_node_free_ref(struct orig_node *orig_node); -struct orig_node *batadv_get_orig_node(struct bat_priv *bat_priv, - const uint8_t *addr); +int originator_init(struct bat_priv *bat_priv); +void originator_free(struct bat_priv *bat_priv); +void purge_orig_ref(struct bat_priv *bat_priv); +void orig_node_free_ref(struct orig_node *orig_node); +struct orig_node *get_orig_node(struct bat_priv *bat_priv, const uint8_t *addr); struct neigh_node *batadv_neigh_node_new(struct hard_iface *hard_iface, const uint8_t *neigh_addr, uint32_t seqno); -void batadv_neigh_node_free_ref(struct neigh_node *neigh_node); -struct neigh_node *batadv_orig_node_get_router(struct orig_node *orig_node); -int batadv_orig_seq_print_text(struct seq_file *seq, void *offset); -int batadv_orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num); -int batadv_orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num); +void neigh_node_free_ref(struct neigh_node *neigh_node); +struct neigh_node *orig_node_get_router(struct orig_node *orig_node); +int orig_seq_print_text(struct seq_file *seq, void *offset); +int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num); +int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num); -/* hashfunction to choose an entry in a hash table of given size - * hash algorithm from http://en.wikipedia.org/wiki/Hash_table - */ +/* hashfunction to choose an entry in a hash table of given size */ +/* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */ static inline uint32_t choose_orig(const void *data, uint32_t size) { const unsigned char *key = data; diff --git a/trunk/net/batman-adv/packet.h b/trunk/net/batman-adv/packet.h index c90219cd648e..033d99490e82 100644 --- a/trunk/net/batman-adv/packet.h +++ b/trunk/net/batman-adv/packet.h @@ -1,4 +1,5 @@ -/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: +/* + * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * @@ -15,6 +16,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA + * */ #ifndef _NET_BATMAN_ADV_PACKET_H_ @@ -79,8 +81,7 @@ enum tt_query_flags { /* TT_CLIENT flags. * Flags from 1 to 1 << 7 are sent on the wire, while flags from 1 << 8 to - * 1 << 15 are used for local computation only - */ + * 1 << 15 are used for local computation only */ enum tt_client_flags { TT_CLIENT_DEL = 1 << 0, TT_CLIENT_ROAM = 1 << 1, @@ -141,8 +142,7 @@ struct icmp_packet { #define BAT_RR_LEN 16 /* icmp_packet_rr must start with all fields from imcp_packet - * as this is assumed by code that handles ICMP packets - */ + * as this is assumed by code that handles ICMP packets */ struct icmp_packet_rr { struct batman_header header; uint8_t msg_type; /* see ICMP message types above */ @@ -192,8 +192,7 @@ struct tt_query_packet { struct batman_header header; /* the flag field is a combination of: * - TT_REQUEST or TT_RESPONSE - * - TT_FULL_TABLE - */ + * - TT_FULL_TABLE */ uint8_t flags; uint8_t dst[ETH_ALEN]; uint8_t src[ETH_ALEN]; @@ -201,15 +200,13 @@ struct tt_query_packet { * if TT_REQUEST: ttvn that triggered the * request * if TT_RESPONSE: new ttvn for the src - * orig_node - */ + * orig_node */ uint8_t ttvn; /* tt_data field is: * if TT_REQUEST: crc associated with the * ttvn - * if TT_RESPONSE: table_size - */ - __be16 tt_data; + * if TT_RESPONSE: table_size */ + __be16 tt_data; } __packed; struct roam_adv_packet { diff --git a/trunk/net/batman-adv/ring_buffer.c b/trunk/net/batman-adv/ring_buffer.c index aff1ca2990f1..fd63951d118d 100644 --- a/trunk/net/batman-adv/ring_buffer.c +++ b/trunk/net/batman-adv/ring_buffer.c @@ -1,4 +1,5 @@ -/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: +/* + * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: * * Marek Lindner * @@ -15,19 +16,19 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA + * */ #include "main.h" #include "ring_buffer.h" -void batadv_ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index, - uint8_t value) +void ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index, uint8_t value) { lq_recv[*lq_index] = value; *lq_index = (*lq_index + 1) % TQ_GLOBAL_WINDOW_SIZE; } -uint8_t batadv_ring_buffer_avg(const uint8_t lq_recv[]) +uint8_t ring_buffer_avg(const uint8_t lq_recv[]) { const uint8_t *ptr; uint16_t count = 0, i = 0, sum = 0; diff --git a/trunk/net/batman-adv/ring_buffer.h b/trunk/net/batman-adv/ring_buffer.h index fda8c17df273..8b58bd82767d 100644 --- a/trunk/net/batman-adv/ring_buffer.h +++ b/trunk/net/batman-adv/ring_buffer.h @@ -1,4 +1,5 @@ -/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: +/* + * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: * * Marek Lindner * @@ -15,13 +16,13 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA + * */ #ifndef _NET_BATMAN_ADV_RING_BUFFER_H_ #define _NET_BATMAN_ADV_RING_BUFFER_H_ -void batadv_ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index, - uint8_t value); -uint8_t batadv_ring_buffer_avg(const uint8_t lq_recv[]); +void ring_buffer_set(uint8_t lq_recv[], uint8_t *lq_index, uint8_t value); +uint8_t ring_buffer_avg(const uint8_t lq_recv[]); #endif /* _NET_BATMAN_ADV_RING_BUFFER_H_ */ diff --git a/trunk/net/batman-adv/routing.c b/trunk/net/batman-adv/routing.c index 79f63cf11be4..9cfd23c6d64a 100644 --- a/trunk/net/batman-adv/routing.c +++ b/trunk/net/batman-adv/routing.c @@ -1,4 +1,5 @@ -/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: +/* + * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * @@ -15,6 +16,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA + * */ #include "main.h" @@ -32,7 +34,7 @@ static int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if); -void batadv_slide_own_bcast_window(struct hard_iface *hard_iface) +void slide_own_bcast_window(struct hard_iface *hard_iface) { struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); struct hashtable_t *hash = bat_priv->orig_hash; @@ -52,7 +54,7 @@ void batadv_slide_own_bcast_window(struct hard_iface *hard_iface) word_index = hard_iface->if_num * NUM_WORDS; word = &(orig_node->bcast_own[word_index]); - batadv_bit_get_packet(bat_priv, word, 1, 0); + bit_get_packet(bat_priv, word, 1, 0); orig_node->bcast_own_sum[hard_iface->if_num] = bitmap_weight(word, TQ_LOCAL_WINDOW_SIZE); spin_unlock_bh(&orig_node->ogm_cnt_lock); @@ -67,14 +69,14 @@ static void _update_route(struct bat_priv *bat_priv, { struct neigh_node *curr_router; - curr_router = batadv_orig_node_get_router(orig_node); + curr_router = orig_node_get_router(orig_node); /* route deleted */ if ((curr_router) && (!neigh_node)) { bat_dbg(DBG_ROUTES, bat_priv, "Deleting route towards: %pM\n", orig_node->orig); - batadv_tt_global_del_orig(bat_priv, orig_node, - "Deleted route towards originator"); + tt_global_del_orig(bat_priv, orig_node, + "Deleted route towards originator"); /* route added */ } else if ((!curr_router) && (neigh_node)) { @@ -91,7 +93,7 @@ static void _update_route(struct bat_priv *bat_priv, } if (curr_router) - batadv_neigh_node_free_ref(curr_router); + neigh_node_free_ref(curr_router); /* increase refcount of new best neighbor */ if (neigh_node && !atomic_inc_not_zero(&neigh_node->refcount)) @@ -103,30 +105,30 @@ static void _update_route(struct bat_priv *bat_priv, /* decrease refcount of previous best neighbor */ if (curr_router) - batadv_neigh_node_free_ref(curr_router); + neigh_node_free_ref(curr_router); } -void batadv_update_route(struct bat_priv *bat_priv, struct orig_node *orig_node, - struct neigh_node *neigh_node) +void update_route(struct bat_priv *bat_priv, struct orig_node *orig_node, + struct neigh_node *neigh_node) { struct neigh_node *router = NULL; if (!orig_node) goto out; - router = batadv_orig_node_get_router(orig_node); + router = orig_node_get_router(orig_node); if (router != neigh_node) _update_route(bat_priv, orig_node, neigh_node); out: if (router) - batadv_neigh_node_free_ref(router); + neigh_node_free_ref(router); } /* caller must hold the neigh_list_lock */ -void batadv_bonding_candidate_del(struct orig_node *orig_node, - struct neigh_node *neigh_node) +void bonding_candidate_del(struct orig_node *orig_node, + struct neigh_node *neigh_node) { /* this neighbor is not part of our candidate list */ if (list_empty(&neigh_node->bonding_list)) @@ -134,15 +136,15 @@ void batadv_bonding_candidate_del(struct orig_node *orig_node, list_del_rcu(&neigh_node->bonding_list); INIT_LIST_HEAD(&neigh_node->bonding_list); - batadv_neigh_node_free_ref(neigh_node); + neigh_node_free_ref(neigh_node); atomic_dec(&orig_node->bond_candidates); out: return; } -void batadv_bonding_candidate_add(struct orig_node *orig_node, - struct neigh_node *neigh_node) +void bonding_candidate_add(struct orig_node *orig_node, + struct neigh_node *neigh_node) { struct hlist_node *node; struct neigh_node *tmp_neigh_node, *router = NULL; @@ -155,7 +157,7 @@ void batadv_bonding_candidate_add(struct orig_node *orig_node, neigh_node->orig_node->primary_addr)) goto candidate_del; - router = batadv_orig_node_get_router(orig_node); + router = orig_node_get_router(orig_node); if (!router) goto candidate_del; @@ -163,7 +165,8 @@ void batadv_bonding_candidate_add(struct orig_node *orig_node, if (neigh_node->tq_avg < router->tq_avg - BONDING_TQ_THRESHOLD) goto candidate_del; - /* check if we have another candidate with the same mac address or + /** + * check if we have another candidate with the same mac address or * interface. If we do, we won't select this candidate because of * possible interference. */ @@ -174,8 +177,7 @@ void batadv_bonding_candidate_add(struct orig_node *orig_node, continue; /* we only care if the other candidate is even - * considered as candidate. - */ + * considered as candidate. */ if (list_empty(&tmp_neigh_node->bonding_list)) continue; @@ -202,20 +204,19 @@ void batadv_bonding_candidate_add(struct orig_node *orig_node, goto out; candidate_del: - batadv_bonding_candidate_del(orig_node, neigh_node); + bonding_candidate_del(orig_node, neigh_node); out: spin_unlock_bh(&orig_node->neigh_list_lock); if (router) - batadv_neigh_node_free_ref(router); + neigh_node_free_ref(router); } /* copy primary address for bonding */ -void -batadv_bonding_save_primary(const struct orig_node *orig_node, - struct orig_node *orig_neigh_node, - const struct batman_ogm_packet *batman_ogm_packet) +void bonding_save_primary(const struct orig_node *orig_node, + struct orig_node *orig_neigh_node, + const struct batman_ogm_packet *batman_ogm_packet) { if (!(batman_ogm_packet->flags & PRIMARIES_FIRST_HOP)) return; @@ -228,8 +229,8 @@ batadv_bonding_save_primary(const struct orig_node *orig_node, * 0 if the packet is to be accepted * 1 if the packet is to be ignored. */ -int batadv_window_protected(struct bat_priv *bat_priv, int32_t seq_num_diff, - unsigned long *last_reset) +int window_protected(struct bat_priv *bat_priv, int32_t seq_num_diff, + unsigned long *last_reset) { if ((seq_num_diff <= -TQ_LOCAL_WINDOW_SIZE) || (seq_num_diff >= EXPECTED_SEQNO_RANGE)) { @@ -244,9 +245,9 @@ int batadv_window_protected(struct bat_priv *bat_priv, int32_t seq_num_diff, return 0; } -bool batadv_check_management_packet(struct sk_buff *skb, - struct hard_iface *hard_iface, - int header_len) +bool check_management_packet(struct sk_buff *skb, + struct hard_iface *hard_iface, + int header_len) { struct ethhdr *ethhdr; @@ -288,7 +289,7 @@ static int recv_my_icmp_packet(struct bat_priv *bat_priv, /* add data to device queue */ if (icmp_packet->msg_type != ECHO_REQUEST) { - batadv_socket_receive_packet(icmp_packet, icmp_len); + bat_socket_receive_packet(icmp_packet, icmp_len); goto out; } @@ -302,7 +303,7 @@ static int recv_my_icmp_packet(struct bat_priv *bat_priv, if (!orig_node) goto out; - router = batadv_orig_node_get_router(orig_node); + router = orig_node_get_router(orig_node); if (!router) goto out; @@ -317,16 +318,16 @@ static int recv_my_icmp_packet(struct bat_priv *bat_priv, icmp_packet->msg_type = ECHO_REPLY; icmp_packet->header.ttl = TTL; - batadv_send_skb_packet(skb, router->if_incoming, router->addr); + send_skb_packet(skb, router->if_incoming, router->addr); ret = NET_RX_SUCCESS; out: if (primary_if) hardif_free_ref(primary_if); if (router) - batadv_neigh_node_free_ref(router); + neigh_node_free_ref(router); if (orig_node) - batadv_orig_node_free_ref(orig_node); + orig_node_free_ref(orig_node); return ret; } @@ -357,7 +358,7 @@ static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv, if (!orig_node) goto out; - router = batadv_orig_node_get_router(orig_node); + router = orig_node_get_router(orig_node); if (!router) goto out; @@ -372,21 +373,21 @@ static int recv_icmp_ttl_exceeded(struct bat_priv *bat_priv, icmp_packet->msg_type = TTL_EXCEEDED; icmp_packet->header.ttl = TTL; - batadv_send_skb_packet(skb, router->if_incoming, router->addr); + send_skb_packet(skb, router->if_incoming, router->addr); ret = NET_RX_SUCCESS; out: if (primary_if) hardif_free_ref(primary_if); if (router) - batadv_neigh_node_free_ref(router); + neigh_node_free_ref(router); if (orig_node) - batadv_orig_node_free_ref(orig_node); + orig_node_free_ref(orig_node); return ret; } -int batadv_recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if) +int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if) { struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); struct icmp_packet_rr *icmp_packet; @@ -396,7 +397,9 @@ int batadv_recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if) int hdr_size = sizeof(struct icmp_packet); int ret = NET_RX_DROP; - /* we truncate all incoming icmp packets if they don't match our size */ + /** + * we truncate all incoming icmp packets if they don't match our size + */ if (skb->len >= sizeof(struct icmp_packet_rr)) hdr_size = sizeof(struct icmp_packet_rr); @@ -415,7 +418,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if) goto out; /* not for me */ - if (!batadv_is_my_mac(ethhdr->h_dest)) + if (!is_my_mac(ethhdr->h_dest)) goto out; icmp_packet = (struct icmp_packet_rr *)skb->data; @@ -429,7 +432,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if) } /* packet for me */ - if (batadv_is_my_mac(icmp_packet->dst)) + if (is_my_mac(icmp_packet->dst)) return recv_my_icmp_packet(bat_priv, skb, hdr_size); /* TTL exceeded */ @@ -441,7 +444,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if) if (!orig_node) goto out; - router = batadv_orig_node_get_router(orig_node); + router = orig_node_get_router(orig_node); if (!router) goto out; @@ -455,14 +458,14 @@ int batadv_recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if) icmp_packet->header.ttl--; /* route it */ - batadv_send_skb_packet(skb, router->if_incoming, router->addr); + send_skb_packet(skb, router->if_incoming, router->addr); ret = NET_RX_SUCCESS; out: if (router) - batadv_neigh_node_free_ref(router); + neigh_node_free_ref(router); if (orig_node) - batadv_orig_node_free_ref(orig_node); + orig_node_free_ref(orig_node); return ret; } @@ -470,8 +473,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if) * robin fashion over the remaining interfaces. * * This method rotates the bonding list and increases the - * returned router's refcount. - */ + * returned router's refcount. */ static struct neigh_node *find_bond_router(struct orig_node *primary_orig, const struct hard_iface *recv_if) { @@ -504,12 +506,10 @@ static struct neigh_node *find_bond_router(struct orig_node *primary_orig, goto out; /* selected should point to the next element - * after the current router - */ + * after the current router */ spin_lock_bh(&primary_orig->neigh_list_lock); /* this is a list_move(), which unfortunately - * does not exist as rcu version - */ + * does not exist as rcu version */ list_del_rcu(&primary_orig->bond_list); list_add_rcu(&primary_orig->bond_list, &router->bonding_list); @@ -524,8 +524,7 @@ static struct neigh_node *find_bond_router(struct orig_node *primary_orig, * remaining candidates which are not using * this interface. * - * Increases the returned router's refcount - */ + * Increases the returned router's refcount */ static struct neigh_node *find_ifalter_router(struct orig_node *primary_orig, const struct hard_iface *recv_if) { @@ -546,21 +545,19 @@ static struct neigh_node *find_ifalter_router(struct orig_node *primary_orig, continue; /* if we don't have a router yet - * or this one is better, choose it. - */ + * or this one is better, choose it. */ if ((!router) || (tmp_neigh_node->tq_avg > router->tq_avg)) { /* decrement refcount of - * previously selected router - */ + * previously selected router */ if (router) - batadv_neigh_node_free_ref(router); + neigh_node_free_ref(router); router = tmp_neigh_node; atomic_inc_not_zero(&router->refcount); } - batadv_neigh_node_free_ref(tmp_neigh_node); + neigh_node_free_ref(tmp_neigh_node); } /* use the first candidate if nothing was found. */ @@ -572,7 +569,7 @@ static struct neigh_node *find_ifalter_router(struct orig_node *primary_orig, return router; } -int batadv_recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if) +int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if) { struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); struct tt_query_packet *tt_query; @@ -604,9 +601,8 @@ int batadv_recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if) batadv_inc_counter(bat_priv, BAT_CNT_TT_REQUEST_RX); /* If we cannot provide an answer the tt_request is - * forwarded - */ - if (!batadv_send_tt_response(bat_priv, tt_query)) { + * forwarded */ + if (!send_tt_response(bat_priv, tt_query)) { bat_dbg(DBG_TT, bat_priv, "Routing TT_REQUEST to %pM [%c]\n", tt_query->dst, @@ -617,23 +613,22 @@ int batadv_recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if) case TT_RESPONSE: batadv_inc_counter(bat_priv, BAT_CNT_TT_RESPONSE_RX); - if (batadv_is_my_mac(tt_query->dst)) { + if (is_my_mac(tt_query->dst)) { /* packet needs to be linearized to access the TT - * changes - */ + * changes */ if (skb_linearize(skb) < 0) goto out; /* skb_linearize() possibly changed skb->data */ tt_query = (struct tt_query_packet *)skb->data; - tt_size = batadv_tt_len(ntohs(tt_query->tt_data)); + tt_size = tt_len(ntohs(tt_query->tt_data)); /* Ensure we have all the claimed data */ if (unlikely(skb_headlen(skb) < sizeof(struct tt_query_packet) + tt_size)) goto out; - batadv_handle_tt_response(bat_priv, tt_query); + handle_tt_response(bat_priv, tt_query); } else { bat_dbg(DBG_TT, bat_priv, "Routing TT_RESPONSE to %pM [%c]\n", @@ -649,7 +644,7 @@ int batadv_recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if) return NET_RX_DROP; } -int batadv_recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if) +int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if) { struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); struct roam_adv_packet *roam_adv_packet; @@ -674,14 +669,14 @@ int batadv_recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if) roam_adv_packet = (struct roam_adv_packet *)skb->data; - if (!batadv_is_my_mac(roam_adv_packet->dst)) + if (!is_my_mac(roam_adv_packet->dst)) return route_unicast_packet(skb, recv_if); /* check if it is a backbone gateway. we don't accept * roaming advertisement from it, as it has the same * entries as we have. */ - if (batadv_bla_is_backbone_gw_orig(bat_priv, roam_adv_packet->src)) + if (bla_is_backbone_gw_orig(bat_priv, roam_adv_packet->src)) goto out; orig_node = orig_hash_find(bat_priv, roam_adv_packet->src); @@ -692,17 +687,15 @@ int batadv_recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if) "Received ROAMING_ADV from %pM (client %pM)\n", roam_adv_packet->src, roam_adv_packet->client); - batadv_tt_global_add(bat_priv, orig_node, roam_adv_packet->client, - atomic_read(&orig_node->last_ttvn) + 1, true, - false); + tt_global_add(bat_priv, orig_node, roam_adv_packet->client, + atomic_read(&orig_node->last_ttvn) + 1, true, false); /* Roaming phase starts: I have new information but the ttvn has not * been incremented yet. This flag will make me check all the incoming - * packets for the correct destination. - */ + * packets for the correct destination. */ bat_priv->tt_poss_change = true; - batadv_orig_node_free_ref(orig_node); + orig_node_free_ref(orig_node); out: /* returning NET_RX_DROP will make the caller function kfree the skb */ return NET_RX_DROP; @@ -710,11 +703,10 @@ int batadv_recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if) /* find a suitable router for this originator, and use * bonding if possible. increases the found neighbors - * refcount. - */ -struct neigh_node *batadv_find_router(struct bat_priv *bat_priv, - struct orig_node *orig_node, - const struct hard_iface *recv_if) + * refcount.*/ +struct neigh_node *find_router(struct bat_priv *bat_priv, + struct orig_node *orig_node, + const struct hard_iface *recv_if) { struct orig_node *primary_orig_node; struct orig_node *router_orig; @@ -725,13 +717,12 @@ struct neigh_node *batadv_find_router(struct bat_priv *bat_priv, if (!orig_node) return NULL; - router = batadv_orig_node_get_router(orig_node); + router = orig_node_get_router(orig_node); if (!router) goto err; /* without bonding, the first node should - * always choose the default router. - */ + * always choose the default router. */ bonding_enabled = atomic_read(&bat_priv->bonding); rcu_read_lock(); @@ -744,14 +735,13 @@ struct neigh_node *batadv_find_router(struct bat_priv *bat_priv, goto return_router; /* if we have something in the primary_addr, we can search - * for a potential bonding candidate. - */ + * for a potential bonding candidate. */ if (compare_eth(router_orig->primary_addr, zero_mac)) goto return_router; /* find the orig_node which has the primary interface. might - * even be the same as our router_orig in many cases - */ + * even be the same as our router_orig in many cases */ + if (compare_eth(router_orig->primary_addr, router_orig->orig)) { primary_orig_node = router_orig; } else { @@ -760,20 +750,19 @@ struct neigh_node *batadv_find_router(struct bat_priv *bat_priv, if (!primary_orig_node) goto return_router; - batadv_orig_node_free_ref(primary_orig_node); + orig_node_free_ref(primary_orig_node); } /* with less than 2 candidates, we can't do any - * bonding and prefer the original router. - */ + * bonding and prefer the original router. */ if (atomic_read(&primary_orig_node->bond_candidates) < 2) goto return_router; /* all nodes between should choose a candidate which * is is not on the interface where the packet came - * in. - */ - batadv_neigh_node_free_ref(router); + * in. */ + + neigh_node_free_ref(router); if (bonding_enabled) router = find_bond_router(primary_orig_node, recv_if); @@ -790,7 +779,7 @@ struct neigh_node *batadv_find_router(struct bat_priv *bat_priv, rcu_read_unlock(); err: if (router) - batadv_neigh_node_free_ref(router); + neigh_node_free_ref(router); return NULL; } @@ -813,7 +802,7 @@ static int check_unicast_packet(struct sk_buff *skb, int hdr_size) return -1; /* not for me */ - if (!batadv_is_my_mac(ethhdr->h_dest)) + if (!is_my_mac(ethhdr->h_dest)) return -1; return 0; @@ -845,7 +834,7 @@ static int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if) goto out; /* find_router() increases neigh_nodes refcount if found. */ - neigh_node = batadv_find_router(bat_priv, orig_node, recv_if); + neigh_node = find_router(bat_priv, orig_node, recv_if); if (!neigh_node) goto out; @@ -859,16 +848,15 @@ static int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if) if (unicast_packet->header.packet_type == BAT_UNICAST && atomic_read(&bat_priv->fragmentation) && skb->len > neigh_node->if_incoming->net_dev->mtu) { - ret = batadv_frag_send_skb(skb, bat_priv, - neigh_node->if_incoming, - neigh_node->addr); + ret = frag_send_skb(skb, bat_priv, + neigh_node->if_incoming, neigh_node->addr); goto out; } if (unicast_packet->header.packet_type == BAT_UNICAST_FRAG && frag_can_reassemble(skb, neigh_node->if_incoming->net_dev->mtu)) { - ret = batadv_frag_reassemble_skb(skb, bat_priv, &new_skb); + ret = frag_reassemble_skb(skb, bat_priv, &new_skb); if (ret == NET_RX_DROP) goto out; @@ -892,14 +880,14 @@ static int route_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if) skb->len + ETH_HLEN); /* route it */ - batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); + send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); ret = NET_RX_SUCCESS; out: if (neigh_node) - batadv_neigh_node_free_ref(neigh_node); + neigh_node_free_ref(neigh_node); if (orig_node) - batadv_orig_node_free_ref(orig_node); + orig_node_free_ref(orig_node); return ret; } @@ -918,7 +906,7 @@ static int check_unicast_ttvn(struct bat_priv *bat_priv, unicast_packet = (struct unicast_packet *)skb->data; - if (batadv_is_my_mac(unicast_packet->dest)) { + if (is_my_mac(unicast_packet->dest)) { tt_poss_change = bat_priv->tt_poss_change; curr_ttvn = (uint8_t)atomic_read(&bat_priv->ttvn); } else { @@ -929,7 +917,7 @@ static int check_unicast_ttvn(struct bat_priv *bat_priv, curr_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn); tt_poss_change = orig_node->tt_poss_change; - batadv_orig_node_free_ref(orig_node); + orig_node_free_ref(orig_node); } /* Check whether I have to reroute the packet */ @@ -945,15 +933,13 @@ static int check_unicast_ttvn(struct bat_priv *bat_priv, /* we don't have an updated route for this client, so we should * not try to reroute the packet!! */ - if (batadv_tt_global_client_is_roaming(bat_priv, - ethhdr->h_dest)) + if (tt_global_client_is_roaming(bat_priv, ethhdr->h_dest)) return 1; - orig_node = batadv_transtable_search(bat_priv, NULL, - ethhdr->h_dest); + orig_node = transtable_search(bat_priv, NULL, ethhdr->h_dest); if (!orig_node) { - if (!batadv_is_my_client(bat_priv, ethhdr->h_dest)) + if (!is_my_client(bat_priv, ethhdr->h_dest)) return 0; primary_if = primary_if_get_selected(bat_priv); if (!primary_if) @@ -966,7 +952,7 @@ static int check_unicast_ttvn(struct bat_priv *bat_priv, ETH_ALEN); curr_ttvn = (uint8_t) atomic_read(&orig_node->last_ttvn); - batadv_orig_node_free_ref(orig_node); + orig_node_free_ref(orig_node); } bat_dbg(DBG_ROUTES, bat_priv, @@ -979,7 +965,7 @@ static int check_unicast_ttvn(struct bat_priv *bat_priv, return 1; } -int batadv_recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if) +int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if) { struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); struct unicast_packet *unicast_packet; @@ -994,17 +980,15 @@ int batadv_recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if) unicast_packet = (struct unicast_packet *)skb->data; /* packet for me */ - if (batadv_is_my_mac(unicast_packet->dest)) { - batadv_interface_rx(recv_if->soft_iface, skb, recv_if, - hdr_size); + if (is_my_mac(unicast_packet->dest)) { + interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size); return NET_RX_SUCCESS; } return route_unicast_packet(skb, recv_if); } -int batadv_recv_ucast_frag_packet(struct sk_buff *skb, - struct hard_iface *recv_if) +int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if) { struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); struct unicast_frag_packet *unicast_packet; @@ -1021,9 +1005,9 @@ int batadv_recv_ucast_frag_packet(struct sk_buff *skb, unicast_packet = (struct unicast_frag_packet *)skb->data; /* packet for me */ - if (batadv_is_my_mac(unicast_packet->dest)) { + if (is_my_mac(unicast_packet->dest)) { - ret = batadv_frag_reassemble_skb(skb, bat_priv, &new_skb); + ret = frag_reassemble_skb(skb, bat_priv, &new_skb); if (ret == NET_RX_DROP) return NET_RX_DROP; @@ -1032,8 +1016,8 @@ int batadv_recv_ucast_frag_packet(struct sk_buff *skb, if (!new_skb) return NET_RX_SUCCESS; - batadv_interface_rx(recv_if->soft_iface, new_skb, recv_if, - sizeof(struct unicast_packet)); + interface_rx(recv_if->soft_iface, new_skb, recv_if, + sizeof(struct unicast_packet)); return NET_RX_SUCCESS; } @@ -1041,7 +1025,7 @@ int batadv_recv_ucast_frag_packet(struct sk_buff *skb, } -int batadv_recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if) +int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if) { struct bat_priv *bat_priv = netdev_priv(recv_if->soft_iface); struct orig_node *orig_node = NULL; @@ -1066,13 +1050,13 @@ int batadv_recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if) goto out; /* ignore broadcasts sent by myself */ - if (batadv_is_my_mac(ethhdr->h_source)) + if (is_my_mac(ethhdr->h_source)) goto out; bcast_packet = (struct bcast_packet *)skb->data; /* ignore broadcasts originated by myself */ - if (batadv_is_my_mac(bcast_packet->orig)) + if (is_my_mac(bcast_packet->orig)) goto out; if (bcast_packet->header.ttl < 2) @@ -1093,33 +1077,32 @@ int batadv_recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if) seq_diff = ntohl(bcast_packet->seqno) - orig_node->last_bcast_seqno; /* check whether the packet is old and the host just restarted. */ - if (batadv_window_protected(bat_priv, seq_diff, - &orig_node->bcast_seqno_reset)) + if (window_protected(bat_priv, seq_diff, + &orig_node->bcast_seqno_reset)) goto spin_unlock; /* mark broadcast in flood history, update window position - * if required. - */ - if (batadv_bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1)) + * if required. */ + if (bit_get_packet(bat_priv, orig_node->bcast_bits, seq_diff, 1)) orig_node->last_bcast_seqno = ntohl(bcast_packet->seqno); spin_unlock_bh(&orig_node->bcast_seqno_lock); /* check whether this has been sent by another originator before */ - if (batadv_bla_check_bcast_duplist(bat_priv, bcast_packet, hdr_size)) + if (bla_check_bcast_duplist(bat_priv, bcast_packet, hdr_size)) goto out; /* rebroadcast packet */ - batadv_add_bcast_packet_to_list(bat_priv, skb, 1); + add_bcast_packet_to_list(bat_priv, skb, 1); /* don't hand the broadcast up if it is from an originator * from the same backbone. */ - if (batadv_bla_is_backbone_gw(skb, orig_node, hdr_size)) + if (bla_is_backbone_gw(skb, orig_node, hdr_size)) goto out; /* broadcast for me */ - batadv_interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size); + interface_rx(recv_if->soft_iface, skb, recv_if, hdr_size); ret = NET_RX_SUCCESS; goto out; @@ -1127,11 +1110,11 @@ int batadv_recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if) spin_unlock_bh(&orig_node->bcast_seqno_lock); out: if (orig_node) - batadv_orig_node_free_ref(orig_node); + orig_node_free_ref(orig_node); return ret; } -int batadv_recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if) +int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if) { struct vis_packet *vis_packet; struct ethhdr *ethhdr; @@ -1149,25 +1132,25 @@ int batadv_recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if) ethhdr = (struct ethhdr *)skb_mac_header(skb); /* not for me */ - if (!batadv_is_my_mac(ethhdr->h_dest)) + if (!is_my_mac(ethhdr->h_dest)) return NET_RX_DROP; /* ignore own packets */ - if (batadv_is_my_mac(vis_packet->vis_orig)) + if (is_my_mac(vis_packet->vis_orig)) return NET_RX_DROP; - if (batadv_is_my_mac(vis_packet->sender_orig)) + if (is_my_mac(vis_packet->sender_orig)) return NET_RX_DROP; switch (vis_packet->vis_type) { case VIS_TYPE_SERVER_SYNC: - batadv_receive_server_sync_packet(bat_priv, vis_packet, - skb_headlen(skb)); + receive_server_sync_packet(bat_priv, vis_packet, + skb_headlen(skb)); break; case VIS_TYPE_CLIENT_UPDATE: - batadv_receive_client_update_packet(bat_priv, vis_packet, - skb_headlen(skb)); + receive_client_update_packet(bat_priv, vis_packet, + skb_headlen(skb)); break; default: /* ignore unknown packet */ @@ -1175,7 +1158,6 @@ int batadv_recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if) } /* We take a copy of the data in the packet, so we should - * always free the skbuf. - */ + always free the skbuf. */ return NET_RX_DROP; } diff --git a/trunk/net/batman-adv/routing.h b/trunk/net/batman-adv/routing.h index c3fd219e8e53..d6bbbebb6567 100644 --- a/trunk/net/batman-adv/routing.h +++ b/trunk/net/batman-adv/routing.h @@ -1,4 +1,5 @@ -/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: +/* + * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * @@ -15,37 +16,36 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA + * */ #ifndef _NET_BATMAN_ADV_ROUTING_H_ #define _NET_BATMAN_ADV_ROUTING_H_ -void batadv_slide_own_bcast_window(struct hard_iface *hard_iface); -bool batadv_check_management_packet(struct sk_buff *skb, - struct hard_iface *hard_iface, - int header_len); -void batadv_update_route(struct bat_priv *bat_priv, struct orig_node *orig_node, - struct neigh_node *neigh_node); -int batadv_recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if); -int batadv_recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if); -int batadv_recv_ucast_frag_packet(struct sk_buff *skb, - struct hard_iface *recv_if); -int batadv_recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if); -int batadv_recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if); -int batadv_recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if); -int batadv_recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if); -struct neigh_node *batadv_find_router(struct bat_priv *bat_priv, - struct orig_node *orig_node, - const struct hard_iface *recv_if); -void batadv_bonding_candidate_del(struct orig_node *orig_node, - struct neigh_node *neigh_node); -void batadv_bonding_candidate_add(struct orig_node *orig_node, - struct neigh_node *neigh_node); -void batadv_bonding_save_primary(const struct orig_node *orig_node, - struct orig_node *orig_neigh_node, - const struct batman_ogm_packet - *batman_ogm_packet); -int batadv_window_protected(struct bat_priv *bat_priv, int32_t seq_num_diff, - unsigned long *last_reset); +void slide_own_bcast_window(struct hard_iface *hard_iface); +bool check_management_packet(struct sk_buff *skb, + struct hard_iface *hard_iface, + int header_len); +void update_route(struct bat_priv *bat_priv, struct orig_node *orig_node, + struct neigh_node *neigh_node); +int recv_icmp_packet(struct sk_buff *skb, struct hard_iface *recv_if); +int recv_unicast_packet(struct sk_buff *skb, struct hard_iface *recv_if); +int recv_ucast_frag_packet(struct sk_buff *skb, struct hard_iface *recv_if); +int recv_bcast_packet(struct sk_buff *skb, struct hard_iface *recv_if); +int recv_vis_packet(struct sk_buff *skb, struct hard_iface *recv_if); +int recv_tt_query(struct sk_buff *skb, struct hard_iface *recv_if); +int recv_roam_adv(struct sk_buff *skb, struct hard_iface *recv_if); +struct neigh_node *find_router(struct bat_priv *bat_priv, + struct orig_node *orig_node, + const struct hard_iface *recv_if); +void bonding_candidate_del(struct orig_node *orig_node, + struct neigh_node *neigh_node); +void bonding_candidate_add(struct orig_node *orig_node, + struct neigh_node *neigh_node); +void bonding_save_primary(const struct orig_node *orig_node, + struct orig_node *orig_neigh_node, + const struct batman_ogm_packet *batman_ogm_packet); +int window_protected(struct bat_priv *bat_priv, int32_t seq_num_diff, + unsigned long *last_reset); #endif /* _NET_BATMAN_ADV_ROUTING_H_ */ diff --git a/trunk/net/batman-adv/send.c b/trunk/net/batman-adv/send.c index 2c92a32ec6c6..79f8973810c0 100644 --- a/trunk/net/batman-adv/send.c +++ b/trunk/net/batman-adv/send.c @@ -1,4 +1,5 @@ -/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: +/* + * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * @@ -15,6 +16,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA + * */ #include "main.h" @@ -30,10 +32,9 @@ static void send_outstanding_bcast_packet(struct work_struct *work); /* send out an already prepared packet to the given address via the - * specified batman interface - */ -int batadv_send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface, - const uint8_t *dst_addr) + * specified batman interface */ +int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface, + const uint8_t *dst_addr) { struct ethhdr *ethhdr; @@ -50,7 +51,7 @@ int batadv_send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface, } /* push to the ethernet header. */ - if (batadv_skb_head_push(skb, ETH_HLEN) < 0) + if (my_skb_head_push(skb, ETH_HLEN) < 0) goto send_skb_err; skb_reset_mac_header(skb); @@ -68,15 +69,15 @@ int batadv_send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface, /* dev_queue_xmit() returns a negative result on error. However on * congestion and traffic shaping, it drops and returns NET_XMIT_DROP - * (which is > 0). This will not be treated as an error. - */ + * (which is > 0). This will not be treated as an error. */ + return dev_queue_xmit(skb); send_skb_err: kfree_skb(skb); return NET_XMIT_DROP; } -void batadv_schedule_bat_ogm(struct hard_iface *hard_iface) +void schedule_bat_ogm(struct hard_iface *hard_iface) { struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); @@ -84,7 +85,8 @@ void batadv_schedule_bat_ogm(struct hard_iface *hard_iface) (hard_iface->if_status == IF_TO_BE_REMOVED)) return; - /* the interface gets activated here to avoid race conditions between + /** + * the interface gets activated here to avoid race conditions between * the moment of activating the interface in * hardif_activate_interface() where the originator mac is set and * outdated packets (especially uninitialized mac addresses) in the @@ -119,7 +121,7 @@ static void _add_bcast_packet_to_list(struct bat_priv *bat_priv, /* start timer for this packet */ INIT_DELAYED_WORK(&forw_packet->delayed_work, send_outstanding_bcast_packet); - queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work, + queue_delayed_work(bat_event_workqueue, &forw_packet->delayed_work, send_time); } @@ -130,11 +132,9 @@ static void _add_bcast_packet_to_list(struct bat_priv *bat_priv, * errors. * * The skb is not consumed, so the caller should make sure that the - * skb is freed. - */ -int batadv_add_bcast_packet_to_list(struct bat_priv *bat_priv, - const struct sk_buff *skb, - unsigned long delay) + * skb is freed. */ +int add_bcast_packet_to_list(struct bat_priv *bat_priv, + const struct sk_buff *skb, unsigned long delay) { struct hard_iface *primary_if = NULL; struct forw_packet *forw_packet; @@ -204,15 +204,14 @@ static void send_outstanding_bcast_packet(struct work_struct *work) /* rebroadcast packet */ rcu_read_lock(); - list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) { + list_for_each_entry_rcu(hard_iface, &hardif_list, list) { if (hard_iface->soft_iface != soft_iface) continue; /* send a copy of the saved skb */ skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC); if (skb1) - batadv_send_skb_packet(skb1, hard_iface, - batadv_broadcast_addr); + send_skb_packet(skb1, hard_iface, broadcast_addr); } rcu_read_unlock(); @@ -230,7 +229,7 @@ static void send_outstanding_bcast_packet(struct work_struct *work) atomic_inc(&bat_priv->bcast_queue_left); } -void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work) +void send_outstanding_bat_ogm_packet(struct work_struct *work) { struct delayed_work *delayed_work = container_of(work, struct delayed_work, work); @@ -248,12 +247,13 @@ void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work) bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet); - /* we have to have at least one packet in the queue + /** + * we have to have at least one packet in the queue * to determine the queues wake up time unless we are * shutting down */ if (forw_packet->own) - batadv_schedule_bat_ogm(forw_packet->if_incoming); + schedule_bat_ogm(forw_packet->if_incoming); out: /* don't count own packet */ @@ -263,8 +263,8 @@ void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work) forw_packet_free(forw_packet); } -void batadv_purge_outstanding_packets(struct bat_priv *bat_priv, - const struct hard_iface *hard_iface) +void purge_outstanding_packets(struct bat_priv *bat_priv, + const struct hard_iface *hard_iface) { struct forw_packet *forw_packet; struct hlist_node *tmp_node, *safe_tmp_node; @@ -283,7 +283,8 @@ void batadv_purge_outstanding_packets(struct bat_priv *bat_priv, hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node, &bat_priv->forw_bcast_list, list) { - /* if purge_outstanding_packets() was called with an argument + /** + * if purge_outstanding_packets() was called with an argument * we delete only packets belonging to the given interface */ if ((hard_iface) && @@ -292,7 +293,8 @@ void batadv_purge_outstanding_packets(struct bat_priv *bat_priv, spin_unlock_bh(&bat_priv->forw_bcast_list_lock); - /* send_outstanding_bcast_packet() will lock the list to + /** + * send_outstanding_bcast_packet() will lock the list to * delete the item from the list */ pending = cancel_delayed_work_sync(&forw_packet->delayed_work); @@ -310,7 +312,8 @@ void batadv_purge_outstanding_packets(struct bat_priv *bat_priv, hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node, &bat_priv->forw_bat_list, list) { - /* if purge_outstanding_packets() was called with an argument + /** + * if purge_outstanding_packets() was called with an argument * we delete only packets belonging to the given interface */ if ((hard_iface) && @@ -319,7 +322,8 @@ void batadv_purge_outstanding_packets(struct bat_priv *bat_priv, spin_unlock_bh(&bat_priv->forw_bat_list_lock); - /* send_outstanding_bat_packet() will lock the list to + /** + * send_outstanding_bat_packet() will lock the list to * delete the item from the list */ pending = cancel_delayed_work_sync(&forw_packet->delayed_work); diff --git a/trunk/net/batman-adv/send.h b/trunk/net/batman-adv/send.h index e3ac75ba432b..824ef06f9b01 100644 --- a/trunk/net/batman-adv/send.h +++ b/trunk/net/batman-adv/send.h @@ -1,4 +1,5 @@ -/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: +/* + * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * @@ -15,19 +16,19 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA + * */ #ifndef _NET_BATMAN_ADV_SEND_H_ #define _NET_BATMAN_ADV_SEND_H_ -int batadv_send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface, - const uint8_t *dst_addr); -void batadv_schedule_bat_ogm(struct hard_iface *hard_iface); -int batadv_add_bcast_packet_to_list(struct bat_priv *bat_priv, - const struct sk_buff *skb, - unsigned long delay); -void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work); -void batadv_purge_outstanding_packets(struct bat_priv *bat_priv, - const struct hard_iface *hard_iface); +int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface, + const uint8_t *dst_addr); +void schedule_bat_ogm(struct hard_iface *hard_iface); +int add_bcast_packet_to_list(struct bat_priv *bat_priv, + const struct sk_buff *skb, unsigned long delay); +void send_outstanding_bat_ogm_packet(struct work_struct *work); +void purge_outstanding_packets(struct bat_priv *bat_priv, + const struct hard_iface *hard_iface); #endif /* _NET_BATMAN_ADV_SEND_H_ */ diff --git a/trunk/net/batman-adv/soft-interface.c b/trunk/net/batman-adv/soft-interface.c index 0658781febde..304a7ba09e03 100644 --- a/trunk/net/batman-adv/soft-interface.c +++ b/trunk/net/batman-adv/soft-interface.c @@ -1,4 +1,5 @@ -/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: +/* + * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * @@ -15,6 +16,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA + * */ #include "main.h" @@ -59,11 +61,12 @@ static const struct ethtool_ops bat_ethtool_ops = { .get_sset_count = batadv_get_sset_count, }; -int batadv_skb_head_push(struct sk_buff *skb, unsigned int len) +int my_skb_head_push(struct sk_buff *skb, unsigned int len) { int result; - /* TODO: We must check if we can release all references to non-payload + /** + * TODO: We must check if we can release all references to non-payload * data using skb_header_release in our skbs to allow skb_cow_header to * work optimally. This means that those skbs are not allowed to read * or write any data which is before the current position of skb->data @@ -106,9 +109,9 @@ static int interface_set_mac_addr(struct net_device *dev, void *p) /* only modify transtable if it has been initialized before */ if (atomic_read(&bat_priv->mesh_state) == MESH_ACTIVE) { - batadv_tt_local_remove(bat_priv, dev->dev_addr, - "mac address changed", false); - batadv_tt_local_add(dev, addr->sa_data, NULL_IFINDEX); + tt_local_remove(bat_priv, dev->dev_addr, + "mac address changed", false); + tt_local_add(dev, addr->sa_data, NULL_IFINDEX); } memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); @@ -119,7 +122,7 @@ static int interface_set_mac_addr(struct net_device *dev, void *p) static int interface_change_mtu(struct net_device *dev, int new_mtu) { /* check ranges */ - if ((new_mtu < 68) || (new_mtu > batadv_hardif_min_mtu(dev))) + if ((new_mtu < 68) || (new_mtu > hardif_min_mtu(dev))) return -EINVAL; dev->mtu = new_mtu; @@ -159,11 +162,11 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface) goto dropped; } - if (batadv_bla_tx(bat_priv, skb, vid)) + if (bla_tx(bat_priv, skb, vid)) goto dropped; /* Register the client MAC in the transtable */ - batadv_tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif); + tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif); /* don't accept stp packets. STP does not help in meshes. * better use the bridge loop avoidance ... @@ -177,17 +180,15 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface) switch (atomic_read(&bat_priv->gw_mode)) { case GW_MODE_SERVER: /* gateway servers should not send dhcp - * requests into the mesh - */ - ret = batadv_gw_is_dhcp_target(skb, &header_len); + * requests into the mesh */ + ret = gw_is_dhcp_target(skb, &header_len); if (ret) goto dropped; break; case GW_MODE_CLIENT: /* gateway clients should send dhcp requests - * via unicast to their gateway - */ - ret = batadv_gw_is_dhcp_target(skb, &header_len); + * via unicast to their gateway */ + ret = gw_is_dhcp_target(skb, &header_len); if (ret) do_bcast = false; break; @@ -203,7 +204,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface) if (!primary_if) goto dropped; - if (batadv_skb_head_push(skb, sizeof(*bcast_packet)) < 0) + if (my_skb_head_push(skb, sizeof(*bcast_packet)) < 0) goto dropped; bcast_packet = (struct bcast_packet *)skb->data; @@ -214,8 +215,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface) bcast_packet->header.packet_type = BAT_BCAST; /* hw address of first interface is the orig mac because only - * this mac is known throughout the mesh - */ + * this mac is known throughout the mesh */ memcpy(bcast_packet->orig, primary_if->net_dev->dev_addr, ETH_ALEN); @@ -223,22 +223,21 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface) bcast_packet->seqno = htonl(atomic_inc_return(&bat_priv->bcast_seqno)); - batadv_add_bcast_packet_to_list(bat_priv, skb, 1); + add_bcast_packet_to_list(bat_priv, skb, 1); /* a copy is stored in the bcast list, therefore removing - * the original skb. - */ + * the original skb. */ kfree_skb(skb); /* unicast packet */ } else { if (atomic_read(&bat_priv->gw_mode) != GW_MODE_OFF) { - ret = batadv_gw_out_of_range(bat_priv, skb, ethhdr); + ret = gw_out_of_range(bat_priv, skb, ethhdr); if (ret) goto dropped; } - ret = batadv_unicast_send_skb(skb, bat_priv); + ret = unicast_send_skb(skb, bat_priv); if (ret != 0) goto dropped_freed; } @@ -257,9 +256,9 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface) return NETDEV_TX_OK; } -void batadv_interface_rx(struct net_device *soft_iface, - struct sk_buff *skb, struct hard_iface *recv_if, - int hdr_size) +void interface_rx(struct net_device *soft_iface, + struct sk_buff *skb, struct hard_iface *recv_if, + int hdr_size) { struct bat_priv *bat_priv = netdev_priv(soft_iface); struct ethhdr *ethhdr; @@ -295,23 +294,22 @@ void batadv_interface_rx(struct net_device *soft_iface, /* should not be necessary anymore as we use skb_pull_rcsum() * TODO: please verify this and remove this TODO - * -- Dec 21st 2009, Simon Wunderlich - */ + * -- Dec 21st 2009, Simon Wunderlich */ - /* skb->ip_summed = CHECKSUM_UNNECESSARY; */ +/* skb->ip_summed = CHECKSUM_UNNECESSARY;*/ bat_priv->stats.rx_packets++; bat_priv->stats.rx_bytes += skb->len + ETH_HLEN; soft_iface->last_rx = jiffies; - if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest)) + if (is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest)) goto dropped; /* Let the bridge loop avoidance check the packet. If will * not handle it, we can safely push it up. */ - if (batadv_bla_rx(bat_priv, skb, vid)) + if (bla_rx(bat_priv, skb, vid)) goto out; netif_rx(skb); @@ -343,7 +341,8 @@ static void interface_setup(struct net_device *dev) dev->destructor = free_netdev; dev->tx_queue_len = 0; - /* can't call min_mtu, because the needed variables + /** + * can't call min_mtu, because the needed variables * have not been initialized yet */ dev->mtu = ETH_DATA_LEN; @@ -358,7 +357,7 @@ static void interface_setup(struct net_device *dev) memset(priv, 0, sizeof(*priv)); } -struct net_device *batadv_softif_create(const char *name) +struct net_device *softif_create(const char *name) { struct net_device *soft_iface; struct bat_priv *bat_priv; @@ -412,28 +411,28 @@ struct net_device *batadv_softif_create(const char *name) if (!bat_priv->bat_counters) goto unreg_soft_iface; - ret = batadv_algo_select(bat_priv, batadv_routing_algo); + ret = bat_algo_select(bat_priv, bat_routing_algo); if (ret < 0) goto free_bat_counters; - ret = batadv_sysfs_add_meshif(soft_iface); + ret = sysfs_add_meshif(soft_iface); if (ret < 0) goto free_bat_counters; - ret = batadv_debugfs_add_meshif(soft_iface); + ret = debugfs_add_meshif(soft_iface); if (ret < 0) goto unreg_sysfs; - ret = batadv_mesh_init(soft_iface); + ret = mesh_init(soft_iface); if (ret < 0) goto unreg_debugfs; return soft_iface; unreg_debugfs: - batadv_debugfs_del_meshif(soft_iface); + debugfs_del_meshif(soft_iface); unreg_sysfs: - batadv_sysfs_del_meshif(soft_iface); + sysfs_del_meshif(soft_iface); free_bat_counters: free_percpu(bat_priv->bat_counters); unreg_soft_iface: @@ -446,15 +445,15 @@ struct net_device *batadv_softif_create(const char *name) return NULL; } -void batadv_softif_destroy(struct net_device *soft_iface) +void softif_destroy(struct net_device *soft_iface) { - batadv_debugfs_del_meshif(soft_iface); - batadv_sysfs_del_meshif(soft_iface); - batadv_mesh_free(soft_iface); + debugfs_del_meshif(soft_iface); + sysfs_del_meshif(soft_iface); + mesh_free(soft_iface); unregister_netdevice(soft_iface); } -int batadv_softif_is_valid(const struct net_device *net_dev) +int softif_is_valid(const struct net_device *net_dev) { if (net_dev->netdev_ops->ndo_start_xmit == interface_tx) return 1; diff --git a/trunk/net/batman-adv/soft-interface.h b/trunk/net/batman-adv/soft-interface.h index 2711ba5b1233..020300673884 100644 --- a/trunk/net/batman-adv/soft-interface.h +++ b/trunk/net/batman-adv/soft-interface.h @@ -1,4 +1,5 @@ -/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: +/* + * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: * * Marek Lindner * @@ -15,16 +16,18 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA + * */ #ifndef _NET_BATMAN_ADV_SOFT_INTERFACE_H_ #define _NET_BATMAN_ADV_SOFT_INTERFACE_H_ -int batadv_skb_head_push(struct sk_buff *skb, unsigned int len); -void batadv_interface_rx(struct net_device *soft_iface, struct sk_buff *skb, - struct hard_iface *recv_if, int hdr_size); -struct net_device *batadv_softif_create(const char *name); -void batadv_softif_destroy(struct net_device *soft_iface); -int batadv_softif_is_valid(const struct net_device *net_dev); +int my_skb_head_push(struct sk_buff *skb, unsigned int len); +void interface_rx(struct net_device *soft_iface, + struct sk_buff *skb, struct hard_iface *recv_if, + int hdr_size); +struct net_device *softif_create(const char *name); +void softif_destroy(struct net_device *soft_iface); +int softif_is_valid(const struct net_device *net_dev); #endif /* _NET_BATMAN_ADV_SOFT_INTERFACE_H_ */ diff --git a/trunk/net/batman-adv/translation-table.c b/trunk/net/batman-adv/translation-table.c index 5180d50e909d..a1a51cc9d88e 100644 --- a/trunk/net/batman-adv/translation-table.c +++ b/trunk/net/batman-adv/translation-table.c @@ -1,4 +1,5 @@ -/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: +/* + * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich, Antonio Quartulli * @@ -15,6 +16,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA + * */ #include "main.h" @@ -46,7 +48,7 @@ static int compare_tt(const struct hlist_node *node, const void *data2) static void tt_start_timer(struct bat_priv *bat_priv) { INIT_DELAYED_WORK(&bat_priv->tt_work, tt_purge); - queue_delayed_work(batadv_event_workqueue, &bat_priv->tt_work, + queue_delayed_work(bat_event_workqueue, &bat_priv->tt_work, msecs_to_jiffies(5000)); } @@ -140,7 +142,7 @@ static void tt_orig_list_entry_free_rcu(struct rcu_head *rcu) orig_entry = container_of(rcu, struct tt_orig_list_entry, rcu); atomic_dec(&orig_entry->orig_node->tt_size); - batadv_orig_node_free_ref(orig_entry->orig_node); + orig_node_free_ref(orig_entry->orig_node); kfree(orig_entry); } @@ -171,7 +173,7 @@ static void tt_local_event(struct bat_priv *bat_priv, const uint8_t *addr, atomic_set(&bat_priv->tt_ogm_append_cnt, 0); } -int batadv_tt_len(int changes_num) +int tt_len(int changes_num) { return changes_num * sizeof(struct tt_change); } @@ -181,7 +183,7 @@ static int tt_local_init(struct bat_priv *bat_priv) if (bat_priv->tt_local_hash) return 0; - bat_priv->tt_local_hash = batadv_hash_new(1024); + bat_priv->tt_local_hash = hash_new(1024); if (!bat_priv->tt_local_hash) return -ENOMEM; @@ -189,8 +191,8 @@ static int tt_local_init(struct bat_priv *bat_priv) return 0; } -void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr, - int ifindex) +void tt_local_add(struct net_device *soft_iface, const uint8_t *addr, + int ifindex) { struct bat_priv *bat_priv = netdev_priv(soft_iface); struct tt_local_entry *tt_local_entry = NULL; @@ -219,7 +221,7 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr, memcpy(tt_local_entry->common.addr, addr, ETH_ALEN); tt_local_entry->common.flags = NO_FLAGS; - if (batadv_is_wifi_iface(ifindex)) + if (is_wifi_iface(ifindex)) tt_local_entry->common.flags |= TT_CLIENT_WIFI; atomic_set(&tt_local_entry->common.refcount, 2); tt_local_entry->last_seen = jiffies; @@ -230,8 +232,7 @@ void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr, /* The local entry has to be marked as NEW to avoid to send it in * a full table response going out before the next ttvn increment - * (consistency check) - */ + * (consistency check) */ tt_local_entry->common.flags |= TT_CLIENT_NEW; hash_added = hash_add(bat_priv->tt_local_hash, compare_tt, choose_orig, @@ -301,7 +302,7 @@ static void tt_prepare_packet_buff(struct bat_priv *bat_priv, primary_if = primary_if_get_selected(bat_priv); req_len = min_packet_len; - req_len += batadv_tt_len(atomic_read(&bat_priv->tt_local_changes)); + req_len += tt_len(atomic_read(&bat_priv->tt_local_changes)); /* if we have too many changes for one packet don't send any * and wait for the tt table request which will be fragmented @@ -331,7 +332,7 @@ static int tt_changes_fill_buff(struct bat_priv *bat_priv, tt_buff = *packet_buff + min_packet_len; if (new_len > 0) - tot_changes = new_len / batadv_tt_len(1); + tot_changes = new_len / tt_len(1); spin_lock_bh(&bat_priv->tt_changes_list_lock); atomic_set(&bat_priv->tt_local_changes, 0); @@ -339,7 +340,7 @@ static int tt_changes_fill_buff(struct bat_priv *bat_priv, list_for_each_entry_safe(entry, safe, &bat_priv->tt_changes_list, list) { if (count < tot_changes) { - memcpy(tt_buff + batadv_tt_len(count), + memcpy(tt_buff + tt_len(count), &entry->change, sizeof(struct tt_change)); count++; } @@ -369,7 +370,7 @@ static int tt_changes_fill_buff(struct bat_priv *bat_priv, return count; } -int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset) +int tt_local_seq_print_text(struct seq_file *seq, void *offset) { struct net_device *net_dev = (struct net_device *)seq->private; struct bat_priv *bat_priv = netdev_priv(net_dev); @@ -436,8 +437,7 @@ static void tt_local_set_pending(struct bat_priv *bat_priv, /* The local client has to be marked as "pending to be removed" but has * to be kept in the table in order to send it in a full table - * response issued before the net ttvn increment (consistency check) - */ + * response issued before the net ttvn increment (consistency check) */ tt_local_entry->common.flags |= TT_CLIENT_PENDING; bat_dbg(DBG_TT, bat_priv, @@ -445,8 +445,8 @@ static void tt_local_set_pending(struct bat_priv *bat_priv, tt_local_entry->common.addr, message); } -void batadv_tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr, - const char *message, bool roaming) +void tt_local_remove(struct bat_priv *bat_priv, const uint8_t *addr, + const char *message, bool roaming) { struct tt_local_entry *tt_local_entry = NULL; @@ -531,7 +531,7 @@ static void tt_local_table_free(struct bat_priv *bat_priv) spin_unlock_bh(list_lock); } - batadv_hash_destroy(hash); + hash_destroy(hash); bat_priv->tt_local_hash = NULL; } @@ -541,7 +541,7 @@ static int tt_global_init(struct bat_priv *bat_priv) if (bat_priv->tt_global_hash) return 0; - bat_priv->tt_global_hash = batadv_hash_new(1024); + bat_priv->tt_global_hash = hash_new(1024); if (!bat_priv->tt_global_hash) return -ENOMEM; @@ -611,9 +611,9 @@ static void tt_global_add_orig_entry(struct tt_global_entry *tt_global_entry, } /* caller must hold orig_node refcount */ -int batadv_tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node, - const unsigned char *tt_addr, uint8_t ttvn, - bool roaming, bool wifi) +int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node, + const unsigned char *tt_addr, uint8_t ttvn, bool roaming, + bool wifi) { struct tt_global_entry *tt_global_entry = NULL; int ret = 0; @@ -677,8 +677,8 @@ int batadv_tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node, out_remove: /* remove address from local hash if present */ - batadv_tt_local_remove(bat_priv, tt_global_entry->common.addr, - "global tt received", roaming); + tt_local_remove(bat_priv, tt_global_entry->common.addr, + "global tt received", roaming); ret = 1; out: if (tt_global_entry) @@ -714,7 +714,7 @@ static void tt_global_print_entry(struct tt_global_entry *tt_global_entry, } } -int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset) +int tt_global_seq_print_text(struct seq_file *seq, void *offset) { struct net_device *net_dev = (struct net_device *)seq->private; struct bat_priv *bat_priv = netdev_priv(net_dev); @@ -898,8 +898,8 @@ static void tt_global_del(struct bat_priv *bat_priv, * If there are other originators left, we directly delete * the originator. * 2) the client roamed to us => we can directly delete - * the global entry, since it is useless now. - */ + * the global entry, since it is useless now. */ + tt_local_entry = tt_local_hash_find(bat_priv, tt_global_entry->common.addr); if (tt_local_entry) { @@ -919,8 +919,8 @@ static void tt_global_del(struct bat_priv *bat_priv, tt_local_entry_free_ref(tt_local_entry); } -void batadv_tt_global_del_orig(struct bat_priv *bat_priv, - struct orig_node *orig_node, const char *message) +void tt_global_del_orig(struct bat_priv *bat_priv, + struct orig_node *orig_node, const char *message) { struct tt_global_entry *tt_global_entry; struct tt_common_entry *tt_common_entry; @@ -1031,7 +1031,7 @@ static void tt_global_table_free(struct bat_priv *bat_priv) spin_unlock_bh(list_lock); } - batadv_hash_destroy(hash); + hash_destroy(hash); bat_priv->tt_global_hash = NULL; } @@ -1048,9 +1048,8 @@ static bool _is_ap_isolated(struct tt_local_entry *tt_local_entry, return ret; } -struct orig_node *batadv_transtable_search(struct bat_priv *bat_priv, - const uint8_t *src, - const uint8_t *addr) +struct orig_node *transtable_search(struct bat_priv *bat_priv, + const uint8_t *src, const uint8_t *addr) { struct tt_local_entry *tt_local_entry = NULL; struct tt_global_entry *tt_global_entry = NULL; @@ -1072,8 +1071,7 @@ struct orig_node *batadv_transtable_search(struct bat_priv *bat_priv, goto out; /* check whether the clients should not communicate due to AP - * isolation - */ + * isolation */ if (tt_local_entry && _is_ap_isolated(tt_local_entry, tt_global_entry)) goto out; @@ -1082,7 +1080,7 @@ struct orig_node *batadv_transtable_search(struct bat_priv *bat_priv, rcu_read_lock(); head = &tt_global_entry->orig_list; hlist_for_each_entry_rcu(orig_entry, node, head, list) { - router = batadv_orig_node_get_router(orig_entry->orig_node); + router = orig_node_get_router(orig_entry->orig_node); if (!router) continue; @@ -1090,7 +1088,7 @@ struct orig_node *batadv_transtable_search(struct bat_priv *bat_priv, orig_node = orig_entry->orig_node; best_tq = router->tq_avg; } - batadv_neigh_node_free_ref(router); + neigh_node_free_ref(router); } /* found anything? */ if (orig_node && !atomic_inc_not_zero(&orig_node->refcount)) @@ -1172,8 +1170,7 @@ static uint16_t batadv_tt_local_crc(struct bat_priv *bat_priv) hlist_for_each_entry_rcu(tt_common_entry, node, head, hash_entry) { /* not yet committed clients have not to be taken into - * account while computing the CRC - */ + * account while computing the CRC */ if (tt_common_entry->flags & TT_CLIENT_NEW) continue; total_one = 0; @@ -1207,11 +1204,10 @@ static void tt_save_orig_buffer(struct bat_priv *bat_priv, const unsigned char *tt_buff, uint8_t tt_num_changes) { - uint16_t tt_buff_len = batadv_tt_len(tt_num_changes); + uint16_t tt_buff_len = tt_len(tt_num_changes); /* Replace the old buffer only if I received something in the - * last OGM (the OGM could carry no changes) - */ + * last OGM (the OGM could carry no changes) */ spin_lock_bh(&orig_node->tt_buff_lock); if (tt_buff_len > 0) { kfree(orig_node->tt_buff); @@ -1240,8 +1236,7 @@ static void tt_req_purge(struct bat_priv *bat_priv) } /* returns the pointer to the new tt_req_node struct if no request - * has already been issued for this orig_node, NULL otherwise - */ + * has already been issued for this orig_node, NULL otherwise */ static struct tt_req_node *new_tt_req_node(struct bat_priv *bat_priv, struct orig_node *orig_node) { @@ -1351,8 +1346,7 @@ static struct sk_buff *tt_response_fill_table(uint16_t tt_len, uint8_t ttvn, rcu_read_unlock(); /* store in the message the number of entries we have successfully - * copied - */ + * copied */ tt_response->tt_data = htons(tt_count); out: @@ -1375,8 +1369,7 @@ static int send_tt_request(struct bat_priv *bat_priv, goto out; /* The new tt_req will be issued only if I'm not waiting for a - * reply from the same orig_node yet - */ + * reply from the same orig_node yet */ tt_req_node = new_tt_req_node(bat_priv, dst_orig_node); if (!tt_req_node) goto out; @@ -1402,7 +1395,7 @@ static int send_tt_request(struct bat_priv *bat_priv, if (full_table) tt_request->flags |= TT_FULL_TABLE; - neigh_node = batadv_orig_node_get_router(dst_orig_node); + neigh_node = orig_node_get_router(dst_orig_node); if (!neigh_node) goto out; @@ -1413,12 +1406,12 @@ static int send_tt_request(struct bat_priv *bat_priv, batadv_inc_counter(bat_priv, BAT_CNT_TT_REQUEST_TX); - batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); + send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); ret = 0; out: if (neigh_node) - batadv_neigh_node_free_ref(neigh_node); + neigh_node_free_ref(neigh_node); if (primary_if) hardif_free_ref(primary_if); if (ret) @@ -1460,7 +1453,7 @@ static bool send_other_tt_response(struct bat_priv *bat_priv, if (!res_dst_orig_node) goto out; - neigh_node = batadv_orig_node_get_router(res_dst_orig_node); + neigh_node = orig_node_get_router(res_dst_orig_node); if (!neigh_node) goto out; @@ -1484,8 +1477,7 @@ static bool send_other_tt_response(struct bat_priv *bat_priv, full_table = false; /* In this version, fragmentation is not implemented, then - * I'll send only one packet with as much TT entries as I can - */ + * I'll send only one packet with as much TT entries as I can */ if (!full_table) { spin_lock_bh(&req_dst_orig_node->tt_buff_lock); tt_len = req_dst_orig_node->tt_buff_len; @@ -1540,7 +1532,7 @@ static bool send_other_tt_response(struct bat_priv *bat_priv, batadv_inc_counter(bat_priv, BAT_CNT_TT_RESPONSE_TX); - batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); + send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); ret = true; goto out; @@ -1549,11 +1541,11 @@ static bool send_other_tt_response(struct bat_priv *bat_priv, out: if (res_dst_orig_node) - batadv_orig_node_free_ref(res_dst_orig_node); + orig_node_free_ref(res_dst_orig_node); if (req_dst_orig_node) - batadv_orig_node_free_ref(req_dst_orig_node); + orig_node_free_ref(req_dst_orig_node); if (neigh_node) - batadv_neigh_node_free_ref(neigh_node); + neigh_node_free_ref(neigh_node); if (primary_if) hardif_free_ref(primary_if); if (!ret) @@ -1588,7 +1580,7 @@ static bool send_my_tt_response(struct bat_priv *bat_priv, if (!orig_node) goto out; - neigh_node = batadv_orig_node_get_router(orig_node); + neigh_node = orig_node_get_router(orig_node); if (!neigh_node) goto out; @@ -1597,8 +1589,7 @@ static bool send_my_tt_response(struct bat_priv *bat_priv, goto out; /* If the full table has been explicitly requested or the gap - * is too big send the whole local translation table - */ + * is too big send the whole local translation table */ if (tt_request->flags & TT_FULL_TABLE || my_ttvn != req_ttvn || !bat_priv->tt_buff) full_table = true; @@ -1606,8 +1597,7 @@ static bool send_my_tt_response(struct bat_priv *bat_priv, full_table = false; /* In this version, fragmentation is not implemented, then - * I'll send only one packet with as much TT entries as I can - */ + * I'll send only one packet with as much TT entries as I can */ if (!full_table) { spin_lock_bh(&bat_priv->tt_buff_lock); tt_len = bat_priv->tt_buff_len; @@ -1660,7 +1650,7 @@ static bool send_my_tt_response(struct bat_priv *bat_priv, batadv_inc_counter(bat_priv, BAT_CNT_TT_RESPONSE_TX); - batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); + send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); ret = true; goto out; @@ -1668,9 +1658,9 @@ static bool send_my_tt_response(struct bat_priv *bat_priv, spin_unlock_bh(&bat_priv->tt_buff_lock); out: if (orig_node) - batadv_orig_node_free_ref(orig_node); + orig_node_free_ref(orig_node); if (neigh_node) - batadv_neigh_node_free_ref(neigh_node); + neigh_node_free_ref(neigh_node); if (primary_if) hardif_free_ref(primary_if); if (!ret) @@ -1679,12 +1669,12 @@ static bool send_my_tt_response(struct bat_priv *bat_priv, return true; } -bool batadv_send_tt_response(struct bat_priv *bat_priv, - struct tt_query_packet *tt_request) +bool send_tt_response(struct bat_priv *bat_priv, + struct tt_query_packet *tt_request) { - if (batadv_is_my_mac(tt_request->dst)) { + if (is_my_mac(tt_request->dst)) { /* don't answer backbone gws! */ - if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_request->src)) + if (bla_is_backbone_gw_orig(bat_priv, tt_request->src)) return true; return send_my_tt_response(bat_priv, tt_request); @@ -1699,19 +1689,18 @@ static void _tt_update_changes(struct bat_priv *bat_priv, uint16_t tt_num_changes, uint8_t ttvn) { int i; - int is_wifi; for (i = 0; i < tt_num_changes; i++) { - if ((tt_change + i)->flags & TT_CLIENT_DEL) { + if ((tt_change + i)->flags & TT_CLIENT_DEL) tt_global_del(bat_priv, orig_node, (tt_change + i)->addr, "tt removed by changes", (tt_change + i)->flags & TT_CLIENT_ROAM); - } else { - is_wifi = (tt_change + i)->flags & TT_CLIENT_WIFI; - if (!batadv_tt_global_add(bat_priv, orig_node, - (tt_change + i)->addr, ttvn, - false, is_wifi)) + else + if (!tt_global_add(bat_priv, orig_node, + (tt_change + i)->addr, ttvn, false, + (tt_change + i)->flags & + TT_CLIENT_WIFI)) /* In case of problem while storing a * global_entry, we stop the updating * procedure without committing the @@ -1719,7 +1708,6 @@ static void _tt_update_changes(struct bat_priv *bat_priv, * corrupted data on tt_request */ return; - } } orig_node->tt_initialised = true; } @@ -1734,7 +1722,7 @@ static void tt_fill_gtable(struct bat_priv *bat_priv, goto out; /* Purge the old table first.. */ - batadv_tt_global_del_orig(bat_priv, orig_node, "Received full table"); + tt_global_del_orig(bat_priv, orig_node, "Received full table"); _tt_update_changes(bat_priv, orig_node, (struct tt_change *)(tt_response + 1), @@ -1750,7 +1738,7 @@ static void tt_fill_gtable(struct bat_priv *bat_priv, out: if (orig_node) - batadv_orig_node_free_ref(orig_node); + orig_node_free_ref(orig_node); } static void tt_update_changes(struct bat_priv *bat_priv, @@ -1766,7 +1754,7 @@ static void tt_update_changes(struct bat_priv *bat_priv, atomic_set(&orig_node->last_ttvn, ttvn); } -bool batadv_is_my_client(struct bat_priv *bat_priv, const uint8_t *addr) +bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr) { struct tt_local_entry *tt_local_entry = NULL; bool ret = false; @@ -1775,8 +1763,7 @@ bool batadv_is_my_client(struct bat_priv *bat_priv, const uint8_t *addr) if (!tt_local_entry) goto out; /* Check if the client has been logically deleted (but is kept for - * consistency purpose) - */ + * consistency purpose) */ if (tt_local_entry->common.flags & TT_CLIENT_PENDING) goto out; ret = true; @@ -1786,8 +1773,8 @@ bool batadv_is_my_client(struct bat_priv *bat_priv, const uint8_t *addr) return ret; } -void batadv_handle_tt_response(struct bat_priv *bat_priv, - struct tt_query_packet *tt_response) +void handle_tt_response(struct bat_priv *bat_priv, + struct tt_query_packet *tt_response) { struct tt_req_node *node, *safe; struct orig_node *orig_node = NULL; @@ -1799,7 +1786,7 @@ void batadv_handle_tt_response(struct bat_priv *bat_priv, (tt_response->flags & TT_FULL_TABLE ? 'F' : '.')); /* we should have never asked a backbone gw */ - if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_response->src)) + if (bla_is_backbone_gw_orig(bat_priv, tt_response->src)) goto out; orig_node = orig_hash_find(bat_priv, tt_response->src); @@ -1827,15 +1814,14 @@ void batadv_handle_tt_response(struct bat_priv *bat_priv, /* Recalculate the CRC for this orig_node and store it */ orig_node->tt_crc = tt_global_crc(bat_priv, orig_node); /* Roaming phase is over: tables are in sync again. I can - * unset the flag - */ + * unset the flag */ orig_node->tt_poss_change = false; out: if (orig_node) - batadv_orig_node_free_ref(orig_node); + orig_node_free_ref(orig_node); } -int batadv_tt_init(struct bat_priv *bat_priv) +int tt_init(struct bat_priv *bat_priv) { int ret; @@ -1885,8 +1871,7 @@ static void tt_roam_purge(struct bat_priv *bat_priv) * maximum number of possible roaming phases. In this case the ROAMING_ADV * will not be sent. * - * returns true if the ROAMING_ADV can be sent, false otherwise - */ + * returns true if the ROAMING_ADV can be sent, false otherwise */ static bool tt_check_roam_count(struct bat_priv *bat_priv, uint8_t *client) { @@ -1895,8 +1880,7 @@ static bool tt_check_roam_count(struct bat_priv *bat_priv, spin_lock_bh(&bat_priv->tt_roam_list_lock); /* The new tt_req will be issued only if I'm not waiting for a - * reply from the same orig_node yet - */ + * reply from the same orig_node yet */ list_for_each_entry(tt_roam_node, &bat_priv->tt_roam_list, list) { if (!compare_eth(tt_roam_node->addr, client)) continue; @@ -1939,8 +1923,7 @@ static void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client, struct hard_iface *primary_if; /* before going on we have to check whether the client has - * already roamed to us too many times - */ + * already roamed to us too many times */ if (!tt_check_roam_count(bat_priv, client)) goto out; @@ -1964,7 +1947,7 @@ static void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client, memcpy(roam_adv_packet->dst, orig_node->orig, ETH_ALEN); memcpy(roam_adv_packet->client, client, ETH_ALEN); - neigh_node = batadv_orig_node_get_router(orig_node); + neigh_node = orig_node_get_router(orig_node); if (!neigh_node) goto out; @@ -1974,12 +1957,12 @@ static void send_roam_adv(struct bat_priv *bat_priv, uint8_t *client, batadv_inc_counter(bat_priv, BAT_CNT_TT_ROAM_ADV_TX); - batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); + send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); ret = 0; out: if (neigh_node) - batadv_neigh_node_free_ref(neigh_node); + neigh_node_free_ref(neigh_node); if (ret) kfree_skb(skb); return; @@ -2000,7 +1983,7 @@ static void tt_purge(struct work_struct *work) tt_start_timer(bat_priv); } -void batadv_tt_free(struct bat_priv *bat_priv) +void tt_free(struct bat_priv *bat_priv) { cancel_delayed_work_sync(&bat_priv->tt_work); @@ -2014,8 +1997,7 @@ void batadv_tt_free(struct bat_priv *bat_priv) } /* This function will enable or disable the specified flags for all the entries - * in the given hash table and returns the number of modified entries - */ + * in the given hash table and returns the number of modified entries */ static uint16_t tt_set_flags(struct hashtable_t *hash, uint16_t flags, bool enable) { @@ -2143,8 +2125,7 @@ int batadv_tt_append_diff(struct bat_priv *bat_priv, return tt_num_changes; } -bool batadv_is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, - uint8_t *dst) +bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst) { struct tt_local_entry *tt_local_entry = NULL; struct tt_global_entry *tt_global_entry = NULL; @@ -2174,27 +2155,24 @@ bool batadv_is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, return ret; } -void batadv_tt_update_orig(struct bat_priv *bat_priv, - struct orig_node *orig_node, - const unsigned char *tt_buff, uint8_t tt_num_changes, - uint8_t ttvn, uint16_t tt_crc) +void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node, + const unsigned char *tt_buff, uint8_t tt_num_changes, + uint8_t ttvn, uint16_t tt_crc) { uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn); bool full_table = true; /* don't care about a backbone gateways updates. */ - if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig)) + if (bla_is_backbone_gw_orig(bat_priv, orig_node->orig)) return; /* orig table not initialised AND first diff is in the OGM OR the ttvn - * increased by one -> we can apply the attached changes - */ + * increased by one -> we can apply the attached changes */ if ((!orig_node->tt_initialised && ttvn == 1) || ttvn - orig_ttvn == 1) { /* the OGM could not contain the changes due to their size or * because they have already been sent TT_OGM_APPEND_MAX times. - * In this case send a tt request - */ + * In this case send a tt request */ if (!tt_num_changes) { full_table = false; goto request_table; @@ -2205,8 +2183,7 @@ void batadv_tt_update_orig(struct bat_priv *bat_priv, /* Even if we received the precomputed crc with the OGM, we * prefer to recompute it to spot any possible inconsistency - * in the global table - */ + * in the global table */ orig_node->tt_crc = tt_global_crc(bat_priv, orig_node); /* The ttvn alone is not enough to guarantee consistency @@ -2216,19 +2193,17 @@ void batadv_tt_update_orig(struct bat_priv *bat_priv, * consistent or not. E.g. a node could disconnect while its * ttvn is X and reconnect on ttvn = X + TTVN_MAX: in this case * checking the CRC value is mandatory to detect the - * inconsistency - */ + * inconsistency */ if (orig_node->tt_crc != tt_crc) goto request_table; /* Roaming phase is over: tables are in sync again. I can - * unset the flag - */ + * unset the flag */ orig_node->tt_poss_change = false; } else { /* if we missed more than one change or our tables are not - * in sync anymore -> request fresh tt data - */ + * in sync anymore -> request fresh tt data */ + if (!orig_node->tt_initialised || ttvn != orig_ttvn || orig_node->tt_crc != tt_crc) { request_table: @@ -2247,8 +2222,7 @@ void batadv_tt_update_orig(struct bat_priv *bat_priv, * originator to another one. This entry is kept is still kept for consistency * purposes */ -bool batadv_tt_global_client_is_roaming(struct bat_priv *bat_priv, - uint8_t *addr) +bool tt_global_client_is_roaming(struct bat_priv *bat_priv, uint8_t *addr) { struct tt_global_entry *tt_global_entry; bool ret = false; diff --git a/trunk/net/batman-adv/translation-table.h b/trunk/net/batman-adv/translation-table.h index 7edc9dff8ba1..d6ea30f9b026 100644 --- a/trunk/net/batman-adv/translation-table.h +++ b/trunk/net/batman-adv/translation-table.h @@ -1,4 +1,5 @@ -/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: +/* + * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich, Antonio Quartulli * @@ -15,49 +16,43 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA + * */ #ifndef _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ #define _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ -int batadv_tt_len(int changes_num); -int batadv_tt_init(struct bat_priv *bat_priv); -void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr, - int ifindex); -void batadv_tt_local_remove(struct bat_priv *bat_priv, - const uint8_t *addr, const char *message, - bool roaming); -int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset); -void batadv_tt_global_add_orig(struct bat_priv *bat_priv, - struct orig_node *orig_node, - const unsigned char *tt_buff, int tt_buff_len); -int batadv_tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node, - const unsigned char *addr, uint8_t ttvn, bool roaming, - bool wifi); -int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset); -void batadv_tt_global_del_orig(struct bat_priv *bat_priv, - struct orig_node *orig_node, - const char *message); -struct orig_node *batadv_transtable_search(struct bat_priv *bat_priv, - const uint8_t *src, - const uint8_t *addr); -void batadv_tt_free(struct bat_priv *bat_priv); -bool batadv_send_tt_response(struct bat_priv *bat_priv, - struct tt_query_packet *tt_request); -bool batadv_is_my_client(struct bat_priv *bat_priv, const uint8_t *addr); -void batadv_handle_tt_response(struct bat_priv *bat_priv, - struct tt_query_packet *tt_response); -bool batadv_is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, - uint8_t *dst); -void batadv_tt_update_orig(struct bat_priv *bat_priv, - struct orig_node *orig_node, - const unsigned char *tt_buff, uint8_t tt_num_changes, - uint8_t ttvn, uint16_t tt_crc); +int tt_len(int changes_num); +int tt_init(struct bat_priv *bat_priv); +void tt_local_add(struct net_device *soft_iface, const uint8_t *addr, + int ifindex); +void tt_local_remove(struct bat_priv *bat_priv, + const uint8_t *addr, const char *message, bool roaming); +int tt_local_seq_print_text(struct seq_file *seq, void *offset); +void tt_global_add_orig(struct bat_priv *bat_priv, struct orig_node *orig_node, + const unsigned char *tt_buff, int tt_buff_len); +int tt_global_add(struct bat_priv *bat_priv, struct orig_node *orig_node, + const unsigned char *addr, uint8_t ttvn, bool roaming, + bool wifi); +int tt_global_seq_print_text(struct seq_file *seq, void *offset); +void tt_global_del_orig(struct bat_priv *bat_priv, + struct orig_node *orig_node, const char *message); +struct orig_node *transtable_search(struct bat_priv *bat_priv, + const uint8_t *src, const uint8_t *addr); +void tt_free(struct bat_priv *bat_priv); +bool send_tt_response(struct bat_priv *bat_priv, + struct tt_query_packet *tt_request); +bool is_my_client(struct bat_priv *bat_priv, const uint8_t *addr); +void handle_tt_response(struct bat_priv *bat_priv, + struct tt_query_packet *tt_response); +bool is_ap_isolated(struct bat_priv *bat_priv, uint8_t *src, uint8_t *dst); +void tt_update_orig(struct bat_priv *bat_priv, struct orig_node *orig_node, + const unsigned char *tt_buff, uint8_t tt_num_changes, + uint8_t ttvn, uint16_t tt_crc); int batadv_tt_append_diff(struct bat_priv *bat_priv, unsigned char **packet_buff, int *packet_buff_len, int packet_min_len); -bool batadv_tt_global_client_is_roaming(struct bat_priv *bat_priv, - uint8_t *addr); +bool tt_global_client_is_roaming(struct bat_priv *bat_priv, uint8_t *addr); #endif /* _NET_BATMAN_ADV_TRANSLATION_TABLE_H_ */ diff --git a/trunk/net/batman-adv/types.h b/trunk/net/batman-adv/types.h index 053c5d4776ce..bf71d525445a 100644 --- a/trunk/net/batman-adv/types.h +++ b/trunk/net/batman-adv/types.h @@ -1,4 +1,5 @@ -/* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: +/* + * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * @@ -15,8 +16,11 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA + * */ + + #ifndef _NET_BATMAN_ADV_TYPES_H_ #define _NET_BATMAN_ADV_TYPES_H_ @@ -45,7 +49,8 @@ struct hard_iface { struct rcu_head rcu; }; -/* orig_node - structure for orig_list maintaining nodes of mesh +/** + * orig_node - structure for orig_list maintaining nodes of mesh * @primary_addr: hosts primary interface address * @last_seen: when last packet from this node was received * @bcast_seqno_reset: time when the broadcast seqno window was reset @@ -81,8 +86,7 @@ struct orig_node { * If true, then I sent a Roaming_adv to this orig_node and I have to * inspect every packet directed to it to check whether it is still * the true destination or not. This flag will be reset to false as - * soon as I receive a new TTVN from this orig_node - */ + * soon as I receive a new TTVN from this orig_node */ bool tt_poss_change; uint32_t last_real_seqno; uint8_t last_ttl; @@ -97,8 +101,7 @@ struct orig_node { struct bat_priv *bat_priv; unsigned long last_frag_packet; /* ogm_cnt_lock protects: bcast_own, bcast_own_sum, - * neigh_node->real_bits, neigh_node->real_packet_count - */ + * neigh_node->real_bits, neigh_node->real_packet_count */ spinlock_t ogm_cnt_lock; /* bcast_seqno_lock protects bcast_bits, last_bcast_seqno */ spinlock_t bcast_seqno_lock; @@ -115,7 +118,8 @@ struct gw_node { struct rcu_head rcu; }; -/* neigh_node +/** + * neigh_node * @last_seen: when last packet via this neighbor was received */ struct neigh_node { @@ -187,8 +191,7 @@ struct bat_priv { * If true, then I received a Roaming_adv and I have to inspect every * packet directed to me to check whether I am still the true * destination or not. This flag will be reset to false as soon as I - * increase my TTVN - */ + * increase my TTVN */ bool tt_poss_change; char num_ifaces; struct debug_log *debug_log; @@ -323,7 +326,8 @@ struct tt_roam_node { struct list_head list; }; -/* forw_packet - structure for forw_list maintaining packets to be +/** + * forw_packet - structure for forw_list maintaining packets to be * send/forwarded */ struct forw_packet { @@ -365,8 +369,7 @@ struct frag_packet_list_entry { struct vis_info { unsigned long first_seen; /* list of server-neighbors we received a vis-packet - * from. we should not reply to them. - */ + * from. we should not reply to them. */ struct list_head recv_list; struct list_head send_list; struct kref refcount; @@ -374,7 +377,7 @@ struct vis_info { struct bat_priv *bat_priv; /* this packet might be part of the vis send queue. */ struct sk_buff *skb_packet; - /* vis_info may follow here */ + /* vis_info may follow here*/ } __packed; struct vis_info_entry { diff --git a/trunk/net/batman-adv/unicast.c b/trunk/net/batman-adv/unicast.c index b2b76df69607..74175c210858 100644 --- a/trunk/net/batman-adv/unicast.c +++ b/trunk/net/batman-adv/unicast.c @@ -1,4 +1,5 @@ -/* Copyright (C) 2010-2012 B.A.T.M.A.N. contributors: +/* + * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors: * * Andreas Langer * @@ -15,6 +16,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA + * */ #include "main.h" @@ -99,7 +101,7 @@ static int frag_create_buffer(struct list_head *head) for (i = 0; i < FRAG_BUFFER_SIZE; i++) { tfp = kmalloc(sizeof(*tfp), GFP_ATOMIC); if (!tfp) { - batadv_frag_list_free(head); + frag_list_free(head); return -ENOMEM; } tfp->skb = NULL; @@ -149,7 +151,7 @@ static struct frag_packet_list_entry *frag_search_packet(struct list_head *head, return NULL; } -void batadv_frag_list_free(struct list_head *head) +void frag_list_free(struct list_head *head) { struct frag_packet_list_entry *pf, *tmp_pf; @@ -170,8 +172,8 @@ void batadv_frag_list_free(struct list_head *head) * or the skb could be reassembled (skb_new will point to the new packet and * skb was freed) */ -int batadv_frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv, - struct sk_buff **new_skb) +int frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv, + struct sk_buff **new_skb) { struct orig_node *orig_node; struct frag_packet_list_entry *tmp_frag_entry; @@ -210,12 +212,12 @@ int batadv_frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv, out: if (orig_node) - batadv_orig_node_free_ref(orig_node); + orig_node_free_ref(orig_node); return ret; } -int batadv_frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv, - struct hard_iface *hard_iface, const uint8_t dstaddr[]) +int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv, + struct hard_iface *hard_iface, const uint8_t dstaddr[]) { struct unicast_packet tmp_uc, *unicast_packet; struct hard_iface *primary_if; @@ -240,8 +242,8 @@ int batadv_frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv, memcpy(&tmp_uc, unicast_packet, uc_hdr_len); skb_split(skb, frag_skb, data_len / 2 + uc_hdr_len); - if (batadv_skb_head_push(skb, ucf_hdr_len - uc_hdr_len) < 0 || - batadv_skb_head_push(frag_skb, ucf_hdr_len) < 0) + if (my_skb_head_push(skb, ucf_hdr_len - uc_hdr_len) < 0 || + my_skb_head_push(frag_skb, ucf_hdr_len) < 0) goto drop_frag; frag1 = (struct unicast_frag_packet *)skb->data; @@ -266,8 +268,8 @@ int batadv_frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv, frag1->seqno = htons(seqno - 1); frag2->seqno = htons(seqno); - batadv_send_skb_packet(skb, hard_iface, dstaddr); - batadv_send_skb_packet(frag_skb, hard_iface, dstaddr); + send_skb_packet(skb, hard_iface, dstaddr); + send_skb_packet(frag_skb, hard_iface, dstaddr); ret = NET_RX_SUCCESS; goto out; @@ -281,7 +283,7 @@ int batadv_frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv, return ret; } -int batadv_unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv) +int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv) { struct ethhdr *ethhdr = (struct ethhdr *)skb->data; struct unicast_packet *unicast_packet; @@ -292,26 +294,28 @@ int batadv_unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv) /* get routing information */ if (is_multicast_ether_addr(ethhdr->h_dest)) { - orig_node = batadv_gw_get_selected_orig(bat_priv); + orig_node = gw_get_selected_orig(bat_priv); if (orig_node) goto find_router; } /* check for tt host - increases orig_node refcount. - * returns NULL in case of AP isolation - */ - orig_node = batadv_transtable_search(bat_priv, ethhdr->h_source, - ethhdr->h_dest); + * returns NULL in case of AP isolation */ + orig_node = transtable_search(bat_priv, ethhdr->h_source, + ethhdr->h_dest); + find_router: - /* find_router(): + /** + * find_router(): * - if orig_node is NULL it returns NULL * - increases neigh_nodes refcount if found. */ - neigh_node = batadv_find_router(bat_priv, orig_node, NULL); + neigh_node = find_router(bat_priv, orig_node, NULL); + if (!neigh_node) goto out; - if (batadv_skb_head_push(skb, sizeof(*unicast_packet)) < 0) + if (my_skb_head_push(skb, sizeof(*unicast_packet)) < 0) goto out; unicast_packet = (struct unicast_packet *)skb->data; @@ -332,7 +336,7 @@ int batadv_unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv) * try to reroute it because the ttvn contained in the header is less * than the current one */ - if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest)) + if (tt_global_client_is_roaming(bat_priv, ethhdr->h_dest)) unicast_packet->ttvn = unicast_packet->ttvn - 1; if (atomic_read(&bat_priv->fragmentation) && @@ -340,21 +344,20 @@ int batadv_unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv) neigh_node->if_incoming->net_dev->mtu) { /* send frag skb decreases ttl */ unicast_packet->header.ttl++; - ret = batadv_frag_send_skb(skb, bat_priv, - neigh_node->if_incoming, - neigh_node->addr); + ret = frag_send_skb(skb, bat_priv, + neigh_node->if_incoming, neigh_node->addr); goto out; } - batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); + send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr); ret = 0; goto out; out: if (neigh_node) - batadv_neigh_node_free_ref(neigh_node); + neigh_node_free_ref(neigh_node); if (orig_node) - batadv_orig_node_free_ref(orig_node); + orig_node_free_ref(orig_node); if (ret == 1) kfree_skb(skb); return ret; diff --git a/trunk/net/batman-adv/unicast.h b/trunk/net/batman-adv/unicast.h index 87f8f89d1440..a9faf6b1db19 100644 --- a/trunk/net/batman-adv/unicast.h +++ b/trunk/net/batman-adv/unicast.h @@ -1,4 +1,5 @@ -/* Copyright (C) 2010-2012 B.A.T.M.A.N. contributors: +/* + * Copyright (C) 2010-2012 B.A.T.M.A.N. contributors: * * Andreas Langer * @@ -15,6 +16,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA + * */ #ifndef _NET_BATMAN_ADV_UNICAST_H_ @@ -25,13 +27,12 @@ #define FRAG_TIMEOUT 10000 /* purge frag list entries after time in ms */ #define FRAG_BUFFER_SIZE 6 /* number of list elements in buffer */ -int batadv_frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv, - struct sk_buff **new_skb); -void batadv_frag_list_free(struct list_head *head); -int batadv_unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv); -int batadv_frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv, - struct hard_iface *hard_iface, - const uint8_t dstaddr[]); +int frag_reassemble_skb(struct sk_buff *skb, struct bat_priv *bat_priv, + struct sk_buff **new_skb); +void frag_list_free(struct list_head *head); +int unicast_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv); +int frag_send_skb(struct sk_buff *skb, struct bat_priv *bat_priv, + struct hard_iface *hard_iface, const uint8_t dstaddr[]); static inline int frag_can_reassemble(const struct sk_buff *skb, int mtu) { diff --git a/trunk/net/batman-adv/vis.c b/trunk/net/batman-adv/vis.c index 20eef04645bd..01d5da54143e 100644 --- a/trunk/net/batman-adv/vis.c +++ b/trunk/net/batman-adv/vis.c @@ -1,4 +1,5 @@ -/* Copyright (C) 2008-2012 B.A.T.M.A.N. contributors: +/* + * Copyright (C) 2008-2012 B.A.T.M.A.N. contributors: * * Simon Wunderlich * @@ -15,6 +16,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA + * */ #include "main.h" @@ -62,9 +64,8 @@ static int vis_info_cmp(const struct hlist_node *node, const void *data2) return compare_eth(p1->vis_orig, p2->vis_orig); } -/* hash function to choose an entry in a hash table of given size - * hash algorithm from http://en.wikipedia.org/wiki/Hash_table - */ +/* hash function to choose an entry in a hash table of given size */ +/* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */ static uint32_t vis_info_choose(const void *data, uint32_t size) { const struct vis_info *vis_info = data; @@ -117,8 +118,7 @@ static struct vis_info *vis_hash_find(struct bat_priv *bat_priv, } /* insert interface to the list of interfaces of one originator, if it - * does not already exist in the list - */ + * does not already exist in the list */ static void vis_data_insert_interface(const uint8_t *interface, struct hlist_head *if_list, bool primary) @@ -188,7 +188,7 @@ static ssize_t vis_data_read_entry(char *buff, return 0; } -int batadv_vis_seq_print_text(struct seq_file *seq, void *offset) +int vis_seq_print_text(struct seq_file *seq, void *offset) { struct hard_iface *primary_if; struct hlist_node *node; @@ -334,8 +334,7 @@ int batadv_vis_seq_print_text(struct seq_file *seq, void *offset) } /* add the info packet to the send list, if it was not - * already linked in. - */ + * already linked in. */ static void send_list_add(struct bat_priv *bat_priv, struct vis_info *info) { if (list_empty(&info->send_list)) { @@ -345,8 +344,7 @@ static void send_list_add(struct bat_priv *bat_priv, struct vis_info *info) } /* delete the info packet from the send list, if it was - * linked in. - */ + * linked in. */ static void send_list_del(struct vis_info *info) { if (!list_empty(&info->send_list)) { @@ -390,8 +388,7 @@ static int recv_list_is_in(struct bat_priv *bat_priv, /* try to add the packet to the vis_hash. return NULL if invalid (e.g. too old, * broken.. ). vis hash must be locked outside. is_new is set when the packet - * is newer than old entries in the hash. - */ + * is newer than old entries in the hash. */ static struct vis_info *add_packet(struct bat_priv *bat_priv, struct vis_packet *vis_packet, int vis_info_len, int *is_new, @@ -465,7 +462,7 @@ static struct vis_info *add_packet(struct bat_priv *bat_priv, /* Make it a broadcast packet, if required */ if (make_broadcast) - memcpy(packet->target_orig, batadv_broadcast_addr, ETH_ALEN); + memcpy(packet->target_orig, broadcast_addr, ETH_ALEN); /* repair if entries is longer than packet. */ if (packet->entries * sizeof(struct vis_info_entry) > vis_info_len) @@ -486,9 +483,9 @@ static struct vis_info *add_packet(struct bat_priv *bat_priv, } /* handle the server sync packet, forward if needed. */ -void batadv_receive_server_sync_packet(struct bat_priv *bat_priv, - struct vis_packet *vis_packet, - int vis_info_len) +void receive_server_sync_packet(struct bat_priv *bat_priv, + struct vis_packet *vis_packet, + int vis_info_len) { struct vis_info *info; int is_new, make_broadcast; @@ -503,8 +500,7 @@ void batadv_receive_server_sync_packet(struct bat_priv *bat_priv, goto end; /* only if we are server ourselves and packet is newer than the one in - * hash. - */ + * hash.*/ if (vis_server == VIS_TYPE_SERVER_SYNC && is_new) send_list_add(bat_priv, info); end: @@ -512,9 +508,9 @@ void batadv_receive_server_sync_packet(struct bat_priv *bat_priv, } /* handle an incoming client update packet and schedule forward if needed. */ -void batadv_receive_client_update_packet(struct bat_priv *bat_priv, - struct vis_packet *vis_packet, - int vis_info_len) +void receive_client_update_packet(struct bat_priv *bat_priv, + struct vis_packet *vis_packet, + int vis_info_len) { struct vis_info *info; struct vis_packet *packet; @@ -528,7 +524,7 @@ void batadv_receive_client_update_packet(struct bat_priv *bat_priv, /* Are we the target for this VIS packet? */ if (vis_server == VIS_TYPE_SERVER_SYNC && - batadv_is_my_mac(vis_packet->target_orig)) + is_my_mac(vis_packet->target_orig)) are_target = 1; spin_lock_bh(&bat_priv->vis_hash_lock); @@ -547,7 +543,7 @@ void batadv_receive_client_update_packet(struct bat_priv *bat_priv, send_list_add(bat_priv, info); /* ... we're not the recipient (and thus need to forward). */ - } else if (!batadv_is_my_mac(packet->target_orig)) { + } else if (!is_my_mac(packet->target_orig)) { send_list_add(bat_priv, info); } @@ -558,8 +554,7 @@ void batadv_receive_client_update_packet(struct bat_priv *bat_priv, /* Walk the originators and find the VIS server with the best tq. Set the packet * address to its address and return the best_tq. * - * Must be called with the originator hash locked - */ + * Must be called with the originator hash locked */ static int find_best_vis_server(struct bat_priv *bat_priv, struct vis_info *info) { @@ -579,7 +574,7 @@ static int find_best_vis_server(struct bat_priv *bat_priv, rcu_read_lock(); hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { - router = batadv_orig_node_get_router(orig_node); + router = orig_node_get_router(orig_node); if (!router) continue; @@ -589,7 +584,7 @@ static int find_best_vis_server(struct bat_priv *bat_priv, memcpy(packet->target_orig, orig_node->orig, ETH_ALEN); } - batadv_neigh_node_free_ref(router); + neigh_node_free_ref(router); } rcu_read_unlock(); } @@ -610,8 +605,7 @@ static bool vis_packet_full(const struct vis_info *info) } /* generates a packet of own vis data, - * returns 0 on success, -1 if no packet could be generated - */ + * returns 0 on success, -1 if no packet could be generated */ static int generate_vis_packet(struct bat_priv *bat_priv) { struct hashtable_t *hash = bat_priv->orig_hash; @@ -629,7 +623,7 @@ static int generate_vis_packet(struct bat_priv *bat_priv) info->first_seen = jiffies; packet->vis_type = atomic_read(&bat_priv->vis_mode); - memcpy(packet->target_orig, batadv_broadcast_addr, ETH_ALEN); + memcpy(packet->target_orig, broadcast_addr, ETH_ALEN); packet->header.ttl = TTL; packet->seqno = htonl(ntohl(packet->seqno) + 1); packet->entries = 0; @@ -647,7 +641,7 @@ static int generate_vis_packet(struct bat_priv *bat_priv) rcu_read_lock(); hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { - router = batadv_orig_node_get_router(orig_node); + router = orig_node_get_router(orig_node); if (!router) continue; @@ -671,7 +665,7 @@ static int generate_vis_packet(struct bat_priv *bat_priv) packet->entries++; next: - batadv_neigh_node_free_ref(router); + neigh_node_free_ref(router); if (vis_packet_full(info)) goto unlock; @@ -709,8 +703,7 @@ static int generate_vis_packet(struct bat_priv *bat_priv) } /* free old vis packets. Must be called with this vis_hash_lock - * held - */ + * held */ static void purge_vis_packets(struct bat_priv *bat_priv) { uint32_t i; @@ -764,16 +757,15 @@ static void broadcast_vis_packet(struct bat_priv *bat_priv, if (!(orig_node->flags & VIS_SERVER)) continue; - router = batadv_orig_node_get_router(orig_node); + router = orig_node_get_router(orig_node); if (!router) continue; /* don't send it if we already received the packet from - * this node. - */ + * this node. */ if (recv_list_is_in(bat_priv, &info->recv_list, orig_node->orig)) { - batadv_neigh_node_free_ref(router); + neigh_node_free_ref(router); continue; } @@ -781,12 +773,11 @@ static void broadcast_vis_packet(struct bat_priv *bat_priv, hard_iface = router->if_incoming; memcpy(dstaddr, router->addr, ETH_ALEN); - batadv_neigh_node_free_ref(router); + neigh_node_free_ref(router); skb = skb_clone(info->skb_packet, GFP_ATOMIC); if (skb) - batadv_send_skb_packet(skb, hard_iface, - dstaddr); + send_skb_packet(skb, hard_iface, dstaddr); } rcu_read_unlock(); @@ -807,19 +798,19 @@ static void unicast_vis_packet(struct bat_priv *bat_priv, if (!orig_node) goto out; - router = batadv_orig_node_get_router(orig_node); + router = orig_node_get_router(orig_node); if (!router) goto out; skb = skb_clone(info->skb_packet, GFP_ATOMIC); if (skb) - batadv_send_skb_packet(skb, router->if_incoming, router->addr); + send_skb_packet(skb, router->if_incoming, router->addr); out: if (router) - batadv_neigh_node_free_ref(router); + neigh_node_free_ref(router); if (orig_node) - batadv_orig_node_free_ref(orig_node); + orig_node_free_ref(orig_node); } /* only send one vis packet. called from send_vis_packets() */ @@ -887,9 +878,8 @@ static void send_vis_packets(struct work_struct *work) } /* init the vis server. this may only be called when if_list is already - * initialized (e.g. bat0 is initialized, interfaces have been added) - */ -int batadv_vis_init(struct bat_priv *bat_priv) + * initialized (e.g. bat0 is initialized, interfaces have been added) */ +int vis_init(struct bat_priv *bat_priv) { struct vis_packet *packet; int hash_added; @@ -899,7 +889,7 @@ int batadv_vis_init(struct bat_priv *bat_priv) spin_lock_bh(&bat_priv->vis_hash_lock); - bat_priv->vis_hash = batadv_hash_new(256); + bat_priv->vis_hash = hash_new(256); if (!bat_priv->vis_hash) { pr_err("Can't initialize vis_hash\n"); goto err; @@ -953,7 +943,7 @@ int batadv_vis_init(struct bat_priv *bat_priv) bat_priv->my_vis_info = NULL; err: spin_unlock_bh(&bat_priv->vis_hash_lock); - batadv_vis_quit(bat_priv); + vis_quit(bat_priv); return -ENOMEM; } @@ -968,7 +958,7 @@ static void free_info_ref(struct hlist_node *node, void *arg) } /* shutdown vis-server */ -void batadv_vis_quit(struct bat_priv *bat_priv) +void vis_quit(struct bat_priv *bat_priv) { if (!bat_priv->vis_hash) return; @@ -987,6 +977,6 @@ void batadv_vis_quit(struct bat_priv *bat_priv) static void start_vis_timer(struct bat_priv *bat_priv) { INIT_DELAYED_WORK(&bat_priv->vis_work, send_vis_packets); - queue_delayed_work(batadv_event_workqueue, &bat_priv->vis_work, + queue_delayed_work(bat_event_workqueue, &bat_priv->vis_work, msecs_to_jiffies(VIS_INTERVAL)); } diff --git a/trunk/net/batman-adv/vis.h b/trunk/net/batman-adv/vis.h index dad595870f8f..ee2e46e5347b 100644 --- a/trunk/net/batman-adv/vis.h +++ b/trunk/net/batman-adv/vis.h @@ -1,4 +1,5 @@ -/* Copyright (C) 2008-2012 B.A.T.M.A.N. contributors: +/* + * Copyright (C) 2008-2012 B.A.T.M.A.N. contributors: * * Simon Wunderlich, Marek Lindner * @@ -15,22 +16,23 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA + * */ #ifndef _NET_BATMAN_ADV_VIS_H_ #define _NET_BATMAN_ADV_VIS_H_ -/* timeout of vis packets in miliseconds */ -#define VIS_TIMEOUT 200000 +#define VIS_TIMEOUT 200000 /* timeout of vis packets + * in miliseconds */ -int batadv_vis_seq_print_text(struct seq_file *seq, void *offset); -void batadv_receive_server_sync_packet(struct bat_priv *bat_priv, - struct vis_packet *vis_packet, - int vis_info_len); -void batadv_receive_client_update_packet(struct bat_priv *bat_priv, - struct vis_packet *vis_packet, - int vis_info_len); -int batadv_vis_init(struct bat_priv *bat_priv); -void batadv_vis_quit(struct bat_priv *bat_priv); +int vis_seq_print_text(struct seq_file *seq, void *offset); +void receive_server_sync_packet(struct bat_priv *bat_priv, + struct vis_packet *vis_packet, + int vis_info_len); +void receive_client_update_packet(struct bat_priv *bat_priv, + struct vis_packet *vis_packet, + int vis_info_len); +int vis_init(struct bat_priv *bat_priv); +void vis_quit(struct bat_priv *bat_priv); #endif /* _NET_BATMAN_ADV_VIS_H_ */ diff --git a/trunk/net/bluetooth/Makefile b/trunk/net/bluetooth/Makefile index fa6d94a4602a..2dc5a5700f53 100644 --- a/trunk/net/bluetooth/Makefile +++ b/trunk/net/bluetooth/Makefile @@ -9,5 +9,4 @@ obj-$(CONFIG_BT_CMTP) += cmtp/ obj-$(CONFIG_BT_HIDP) += hidp/ bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \ - hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o \ - a2mp.o + hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o diff --git a/trunk/net/bluetooth/a2mp.c b/trunk/net/bluetooth/a2mp.c deleted file mode 100644 index fb93250b3938..000000000000 --- a/trunk/net/bluetooth/a2mp.c +++ /dev/null @@ -1,568 +0,0 @@ -/* - Copyright (c) 2010,2011 Code Aurora Forum. All rights reserved. - Copyright (c) 2011,2012 Intel Corp. - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License version 2 and - only version 2 as published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. -*/ - -#include -#include -#include -#include - -/* A2MP build & send command helper functions */ -static struct a2mp_cmd *__a2mp_build(u8 code, u8 ident, u16 len, void *data) -{ - struct a2mp_cmd *cmd; - int plen; - - plen = sizeof(*cmd) + len; - cmd = kzalloc(plen, GFP_KERNEL); - if (!cmd) - return NULL; - - cmd->code = code; - cmd->ident = ident; - cmd->len = cpu_to_le16(len); - - memcpy(cmd->data, data, len); - - return cmd; -} - -static void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len, - void *data) -{ - struct l2cap_chan *chan = mgr->a2mp_chan; - struct a2mp_cmd *cmd; - u16 total_len = len + sizeof(*cmd); - struct kvec iv; - struct msghdr msg; - - cmd = __a2mp_build(code, ident, len, data); - if (!cmd) - return; - - iv.iov_base = cmd; - iv.iov_len = total_len; - - memset(&msg, 0, sizeof(msg)); - - msg.msg_iov = (struct iovec *) &iv; - msg.msg_iovlen = 1; - - l2cap_chan_send(chan, &msg, total_len, 0); - - kfree(cmd); -} - -static inline void __a2mp_cl_bredr(struct a2mp_cl *cl) -{ - cl->id = 0; - cl->type = 0; - cl->status = 1; -} - -/* hci_dev_list shall be locked */ -static void __a2mp_add_cl(struct amp_mgr *mgr, struct a2mp_cl *cl, u8 num_ctrl) -{ - int i = 0; - struct hci_dev *hdev; - - __a2mp_cl_bredr(cl); - - list_for_each_entry(hdev, &hci_dev_list, list) { - /* Iterate through AMP controllers */ - if (hdev->id == HCI_BREDR_ID) - continue; - - /* Starting from second entry */ - if (++i >= num_ctrl) - return; - - cl[i].id = hdev->id; - cl[i].type = hdev->amp_type; - cl[i].status = hdev->amp_status; - } -} - -/* Processing A2MP messages */ -static int a2mp_command_rej(struct amp_mgr *mgr, struct sk_buff *skb, - struct a2mp_cmd *hdr) -{ - struct a2mp_cmd_rej *rej = (void *) skb->data; - - if (le16_to_cpu(hdr->len) < sizeof(*rej)) - return -EINVAL; - - BT_DBG("ident %d reason %d", hdr->ident, le16_to_cpu(rej->reason)); - - skb_pull(skb, sizeof(*rej)); - - return 0; -} - -static int a2mp_discover_req(struct amp_mgr *mgr, struct sk_buff *skb, - struct a2mp_cmd *hdr) -{ - struct a2mp_discov_req *req = (void *) skb->data; - u16 len = le16_to_cpu(hdr->len); - struct a2mp_discov_rsp *rsp; - u16 ext_feat; - u8 num_ctrl; - - if (len < sizeof(*req)) - return -EINVAL; - - skb_pull(skb, sizeof(*req)); - - ext_feat = le16_to_cpu(req->ext_feat); - - BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(req->mtu), ext_feat); - - /* check that packet is not broken for now */ - while (ext_feat & A2MP_FEAT_EXT) { - if (len < sizeof(ext_feat)) - return -EINVAL; - - ext_feat = get_unaligned_le16(skb->data); - BT_DBG("efm 0x%4.4x", ext_feat); - len -= sizeof(ext_feat); - skb_pull(skb, sizeof(ext_feat)); - } - - read_lock(&hci_dev_list_lock); - - num_ctrl = __hci_num_ctrl(); - len = num_ctrl * sizeof(struct a2mp_cl) + sizeof(*rsp); - rsp = kmalloc(len, GFP_ATOMIC); - if (!rsp) { - read_unlock(&hci_dev_list_lock); - return -ENOMEM; - } - - rsp->mtu = __constant_cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU); - rsp->ext_feat = 0; - - __a2mp_add_cl(mgr, rsp->cl, num_ctrl); - - read_unlock(&hci_dev_list_lock); - - a2mp_send(mgr, A2MP_DISCOVER_RSP, hdr->ident, len, rsp); - - kfree(rsp); - return 0; -} - -static int a2mp_change_notify(struct amp_mgr *mgr, struct sk_buff *skb, - struct a2mp_cmd *hdr) -{ - struct a2mp_cl *cl = (void *) skb->data; - - while (skb->len >= sizeof(*cl)) { - BT_DBG("Controller id %d type %d status %d", cl->id, cl->type, - cl->status); - cl = (struct a2mp_cl *) skb_pull(skb, sizeof(*cl)); - } - - /* TODO send A2MP_CHANGE_RSP */ - - return 0; -} - -static int a2mp_getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb, - struct a2mp_cmd *hdr) -{ - struct a2mp_info_req *req = (void *) skb->data; - struct a2mp_info_rsp rsp; - struct hci_dev *hdev; - - if (le16_to_cpu(hdr->len) < sizeof(*req)) - return -EINVAL; - - BT_DBG("id %d", req->id); - - rsp.id = req->id; - rsp.status = A2MP_STATUS_INVALID_CTRL_ID; - - hdev = hci_dev_get(req->id); - if (hdev && hdev->amp_type != HCI_BREDR) { - rsp.status = 0; - rsp.total_bw = cpu_to_le32(hdev->amp_total_bw); - rsp.max_bw = cpu_to_le32(hdev->amp_max_bw); - rsp.min_latency = cpu_to_le32(hdev->amp_min_latency); - rsp.pal_cap = cpu_to_le16(hdev->amp_pal_cap); - rsp.assoc_size = cpu_to_le16(hdev->amp_assoc_size); - } - - if (hdev) - hci_dev_put(hdev); - - a2mp_send(mgr, A2MP_GETINFO_RSP, hdr->ident, sizeof(rsp), &rsp); - - skb_pull(skb, sizeof(*req)); - return 0; -} - -static int a2mp_getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb, - struct a2mp_cmd *hdr) -{ - struct a2mp_amp_assoc_req *req = (void *) skb->data; - struct hci_dev *hdev; - - if (le16_to_cpu(hdr->len) < sizeof(*req)) - return -EINVAL; - - BT_DBG("id %d", req->id); - - hdev = hci_dev_get(req->id); - if (!hdev || hdev->amp_type == HCI_BREDR) { - struct a2mp_amp_assoc_rsp rsp; - rsp.id = req->id; - rsp.status = A2MP_STATUS_INVALID_CTRL_ID; - - a2mp_send(mgr, A2MP_GETAMPASSOC_RSP, hdr->ident, sizeof(rsp), - &rsp); - goto clean; - } - - /* Placeholder for HCI Read AMP Assoc */ - -clean: - if (hdev) - hci_dev_put(hdev); - - skb_pull(skb, sizeof(*req)); - return 0; -} - -static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb, - struct a2mp_cmd *hdr) -{ - struct a2mp_physlink_req *req = (void *) skb->data; - - struct a2mp_physlink_rsp rsp; - struct hci_dev *hdev; - - if (le16_to_cpu(hdr->len) < sizeof(*req)) - return -EINVAL; - - BT_DBG("local_id %d, remote_id %d", req->local_id, req->remote_id); - - rsp.local_id = req->remote_id; - rsp.remote_id = req->local_id; - - hdev = hci_dev_get(req->remote_id); - if (!hdev || hdev->amp_type != HCI_AMP) { - rsp.status = A2MP_STATUS_INVALID_CTRL_ID; - goto send_rsp; - } - - /* TODO process physlink create */ - - rsp.status = A2MP_STATUS_SUCCESS; - -send_rsp: - if (hdev) - hci_dev_put(hdev); - - a2mp_send(mgr, A2MP_CREATEPHYSLINK_RSP, hdr->ident, sizeof(rsp), - &rsp); - - skb_pull(skb, le16_to_cpu(hdr->len)); - return 0; -} - -static int a2mp_discphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb, - struct a2mp_cmd *hdr) -{ - struct a2mp_physlink_req *req = (void *) skb->data; - struct a2mp_physlink_rsp rsp; - struct hci_dev *hdev; - - if (le16_to_cpu(hdr->len) < sizeof(*req)) - return -EINVAL; - - BT_DBG("local_id %d remote_id %d", req->local_id, req->remote_id); - - rsp.local_id = req->remote_id; - rsp.remote_id = req->local_id; - rsp.status = A2MP_STATUS_SUCCESS; - - hdev = hci_dev_get(req->local_id); - if (!hdev) { - rsp.status = A2MP_STATUS_INVALID_CTRL_ID; - goto send_rsp; - } - - /* TODO Disconnect Phys Link here */ - - hci_dev_put(hdev); - -send_rsp: - a2mp_send(mgr, A2MP_DISCONNPHYSLINK_RSP, hdr->ident, sizeof(rsp), &rsp); - - skb_pull(skb, sizeof(*req)); - return 0; -} - -static inline int a2mp_cmd_rsp(struct amp_mgr *mgr, struct sk_buff *skb, - struct a2mp_cmd *hdr) -{ - BT_DBG("ident %d code %d", hdr->ident, hdr->code); - - skb_pull(skb, le16_to_cpu(hdr->len)); - return 0; -} - -/* Handle A2MP signalling */ -static int a2mp_chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb) -{ - struct a2mp_cmd *hdr = (void *) skb->data; - struct amp_mgr *mgr = chan->data; - int err = 0; - - amp_mgr_get(mgr); - - while (skb->len >= sizeof(*hdr)) { - struct a2mp_cmd *hdr = (void *) skb->data; - u16 len = le16_to_cpu(hdr->len); - - BT_DBG("code 0x%02x id %d len %d", hdr->code, hdr->ident, len); - - skb_pull(skb, sizeof(*hdr)); - - if (len > skb->len || !hdr->ident) { - err = -EINVAL; - break; - } - - mgr->ident = hdr->ident; - - switch (hdr->code) { - case A2MP_COMMAND_REJ: - a2mp_command_rej(mgr, skb, hdr); - break; - - case A2MP_DISCOVER_REQ: - err = a2mp_discover_req(mgr, skb, hdr); - break; - - case A2MP_CHANGE_NOTIFY: - err = a2mp_change_notify(mgr, skb, hdr); - break; - - case A2MP_GETINFO_REQ: - err = a2mp_getinfo_req(mgr, skb, hdr); - break; - - case A2MP_GETAMPASSOC_REQ: - err = a2mp_getampassoc_req(mgr, skb, hdr); - break; - - case A2MP_CREATEPHYSLINK_REQ: - err = a2mp_createphyslink_req(mgr, skb, hdr); - break; - - case A2MP_DISCONNPHYSLINK_REQ: - err = a2mp_discphyslink_req(mgr, skb, hdr); - break; - - case A2MP_CHANGE_RSP: - case A2MP_DISCOVER_RSP: - case A2MP_GETINFO_RSP: - case A2MP_GETAMPASSOC_RSP: - case A2MP_CREATEPHYSLINK_RSP: - case A2MP_DISCONNPHYSLINK_RSP: - err = a2mp_cmd_rsp(mgr, skb, hdr); - break; - - default: - BT_ERR("Unknown A2MP sig cmd 0x%2.2x", hdr->code); - err = -EINVAL; - break; - } - } - - if (err) { - struct a2mp_cmd_rej rej; - rej.reason = __constant_cpu_to_le16(0); - - BT_DBG("Send A2MP Rej: cmd 0x%2.2x err %d", hdr->code, err); - - a2mp_send(mgr, A2MP_COMMAND_REJ, hdr->ident, sizeof(rej), - &rej); - } - - /* Always free skb and return success error code to prevent - from sending L2CAP Disconnect over A2MP channel */ - kfree_skb(skb); - - amp_mgr_put(mgr); - - return 0; -} - -static void a2mp_chan_close_cb(struct l2cap_chan *chan) -{ - l2cap_chan_destroy(chan); -} - -static void a2mp_chan_state_change_cb(struct l2cap_chan *chan, int state) -{ - struct amp_mgr *mgr = chan->data; - - if (!mgr) - return; - - BT_DBG("chan %p state %s", chan, state_to_string(state)); - - chan->state = state; - - switch (state) { - case BT_CLOSED: - if (mgr) - amp_mgr_put(mgr); - break; - } -} - -static struct sk_buff *a2mp_chan_alloc_skb_cb(struct l2cap_chan *chan, - unsigned long len, int nb) -{ - return bt_skb_alloc(len, GFP_KERNEL); -} - -static struct l2cap_ops a2mp_chan_ops = { - .name = "L2CAP A2MP channel", - .recv = a2mp_chan_recv_cb, - .close = a2mp_chan_close_cb, - .state_change = a2mp_chan_state_change_cb, - .alloc_skb = a2mp_chan_alloc_skb_cb, - - /* Not implemented for A2MP */ - .new_connection = l2cap_chan_no_new_connection, - .teardown = l2cap_chan_no_teardown, - .ready = l2cap_chan_no_ready, -}; - -static struct l2cap_chan *a2mp_chan_open(struct l2cap_conn *conn) -{ - struct l2cap_chan *chan; - int err; - - chan = l2cap_chan_create(); - if (!chan) - return NULL; - - BT_DBG("chan %p", chan); - - chan->chan_type = L2CAP_CHAN_CONN_FIX_A2MP; - chan->flush_to = L2CAP_DEFAULT_FLUSH_TO; - - chan->ops = &a2mp_chan_ops; - - l2cap_chan_set_defaults(chan); - chan->remote_max_tx = chan->max_tx; - chan->remote_tx_win = chan->tx_win; - - chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO; - chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO; - - skb_queue_head_init(&chan->tx_q); - - chan->mode = L2CAP_MODE_ERTM; - - err = l2cap_ertm_init(chan); - if (err < 0) { - l2cap_chan_del(chan, 0); - return NULL; - } - - chan->conf_state = 0; - - l2cap_chan_add(conn, chan); - - chan->remote_mps = chan->omtu; - chan->mps = chan->omtu; - - chan->state = BT_CONNECTED; - - return chan; -} - -/* AMP Manager functions */ -void amp_mgr_get(struct amp_mgr *mgr) -{ - BT_DBG("mgr %p", mgr); - - kref_get(&mgr->kref); -} - -static void amp_mgr_destroy(struct kref *kref) -{ - struct amp_mgr *mgr = container_of(kref, struct amp_mgr, kref); - - BT_DBG("mgr %p", mgr); - - kfree(mgr); -} - -int amp_mgr_put(struct amp_mgr *mgr) -{ - BT_DBG("mgr %p", mgr); - - return kref_put(&mgr->kref, &_mgr_destroy); -} - -static struct amp_mgr *amp_mgr_create(struct l2cap_conn *conn) -{ - struct amp_mgr *mgr; - struct l2cap_chan *chan; - - mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); - if (!mgr) - return NULL; - - BT_DBG("conn %p mgr %p", conn, mgr); - - mgr->l2cap_conn = conn; - - chan = a2mp_chan_open(conn); - if (!chan) { - kfree(mgr); - return NULL; - } - - mgr->a2mp_chan = chan; - chan->data = mgr; - - conn->hcon->amp_mgr = mgr; - - kref_init(&mgr->kref); - - return mgr; -} - -struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn, - struct sk_buff *skb) -{ - struct amp_mgr *mgr; - - mgr = amp_mgr_create(conn); - if (!mgr) { - BT_ERR("Could not create AMP manager"); - return NULL; - } - - BT_DBG("mgr: %p chan %p", mgr, mgr->a2mp_chan); - - return mgr->a2mp_chan; -} diff --git a/trunk/net/bluetooth/af_bluetooth.c b/trunk/net/bluetooth/af_bluetooth.c index f7db5792ec64..3e18af4dadc4 100644 --- a/trunk/net/bluetooth/af_bluetooth.c +++ b/trunk/net/bluetooth/af_bluetooth.c @@ -25,7 +25,18 @@ /* Bluetooth address family and sockets. */ #include + +#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include #include @@ -407,8 +418,7 @@ static inline unsigned int bt_accept_poll(struct sock *parent) return 0; } -unsigned int bt_sock_poll(struct file *file, struct socket *sock, - poll_table *wait) +unsigned int bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; unsigned int mask = 0; diff --git a/trunk/net/bluetooth/bnep/core.c b/trunk/net/bluetooth/bnep/core.c index 4a6620bc1570..031d7d656754 100644 --- a/trunk/net/bluetooth/bnep/core.c +++ b/trunk/net/bluetooth/bnep/core.c @@ -26,9 +26,26 @@ */ #include + +#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include + +#include #include + +#include #include +#include + #include #include @@ -289,7 +306,7 @@ static u8 __bnep_rx_hlen[] = { ETH_ALEN + 2 /* BNEP_COMPRESSED_DST_ONLY */ }; -static int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb) +static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb) { struct net_device *dev = s->dev; struct sk_buff *nskb; @@ -387,7 +404,7 @@ static u8 __bnep_tx_types[] = { BNEP_COMPRESSED }; -static int bnep_tx_frame(struct bnep_session *s, struct sk_buff *skb) +static inline int bnep_tx_frame(struct bnep_session *s, struct sk_buff *skb) { struct ethhdr *eh = (void *) skb->data; struct socket *sock = s->sock; diff --git a/trunk/net/bluetooth/bnep/netdev.c b/trunk/net/bluetooth/bnep/netdev.c index 98f86f91d47c..bc4086480d97 100644 --- a/trunk/net/bluetooth/bnep/netdev.c +++ b/trunk/net/bluetooth/bnep/netdev.c @@ -25,8 +25,16 @@ SOFTWARE IS DISCLAIMED. */ -#include +#include +#include + +#include +#include #include +#include +#include + +#include #include #include @@ -120,7 +128,7 @@ static void bnep_net_timeout(struct net_device *dev) } #ifdef CONFIG_BT_BNEP_MC_FILTER -static int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s) +static inline int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s) { struct ethhdr *eh = (void *) skb->data; @@ -132,7 +140,7 @@ static int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s) #ifdef CONFIG_BT_BNEP_PROTO_FILTER /* Determine ether protocol. Based on eth_type_trans. */ -static u16 bnep_net_eth_proto(struct sk_buff *skb) +static inline u16 bnep_net_eth_proto(struct sk_buff *skb) { struct ethhdr *eh = (void *) skb->data; u16 proto = ntohs(eh->h_proto); @@ -146,7 +154,7 @@ static u16 bnep_net_eth_proto(struct sk_buff *skb) return ETH_P_802_2; } -static int bnep_net_proto_filter(struct sk_buff *skb, struct bnep_session *s) +static inline int bnep_net_proto_filter(struct sk_buff *skb, struct bnep_session *s) { u16 proto = bnep_net_eth_proto(skb); struct bnep_proto_filter *f = s->proto_filter; diff --git a/trunk/net/bluetooth/bnep/sock.c b/trunk/net/bluetooth/bnep/sock.c index 5e5f5b410e0b..180bfc45810d 100644 --- a/trunk/net/bluetooth/bnep/sock.c +++ b/trunk/net/bluetooth/bnep/sock.c @@ -24,8 +24,24 @@ SOFTWARE IS DISCLAIMED. */ -#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include +#include +#include +#include + #include "bnep.h" diff --git a/trunk/net/bluetooth/hci_conn.c b/trunk/net/bluetooth/hci_conn.c index 2fcced377e50..3f18a6ed9731 100644 --- a/trunk/net/bluetooth/hci_conn.c +++ b/trunk/net/bluetooth/hci_conn.c @@ -24,11 +24,24 @@ /* Bluetooth HCI connection handling. */ -#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include #include #include -#include static void hci_le_connect(struct hci_conn *conn) { @@ -41,15 +54,15 @@ static void hci_le_connect(struct hci_conn *conn) conn->sec_level = BT_SECURITY_LOW; memset(&cp, 0, sizeof(cp)); - cp.scan_interval = __constant_cpu_to_le16(0x0060); - cp.scan_window = __constant_cpu_to_le16(0x0030); + cp.scan_interval = cpu_to_le16(0x0060); + cp.scan_window = cpu_to_le16(0x0030); bacpy(&cp.peer_addr, &conn->dst); cp.peer_addr_type = conn->dst_type; - cp.conn_interval_min = __constant_cpu_to_le16(0x0028); - cp.conn_interval_max = __constant_cpu_to_le16(0x0038); - cp.supervision_timeout = __constant_cpu_to_le16(0x002a); - cp.min_ce_len = __constant_cpu_to_le16(0x0000); - cp.max_ce_len = __constant_cpu_to_le16(0x0000); + cp.conn_interval_min = cpu_to_le16(0x0028); + cp.conn_interval_max = cpu_to_le16(0x0038); + cp.supervision_timeout = cpu_to_le16(0x002a); + cp.min_ce_len = cpu_to_le16(0x0000); + cp.max_ce_len = cpu_to_le16(0x0000); hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp); } @@ -86,7 +99,7 @@ void hci_acl_connect(struct hci_conn *conn) cp.pscan_rep_mode = ie->data.pscan_rep_mode; cp.pscan_mode = ie->data.pscan_mode; cp.clock_offset = ie->data.clock_offset | - __constant_cpu_to_le16(0x8000); + cpu_to_le16(0x8000); } memcpy(conn->dev_class, ie->data.dev_class, 3); @@ -162,9 +175,9 @@ void hci_setup_sync(struct hci_conn *conn, __u16 handle) cp.handle = cpu_to_le16(handle); cp.pkt_type = cpu_to_le16(conn->pkt_type); - cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40); - cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40); - cp.max_latency = __constant_cpu_to_le16(0xffff); + cp.tx_bandwidth = cpu_to_le32(0x00001f40); + cp.rx_bandwidth = cpu_to_le32(0x00001f40); + cp.max_latency = cpu_to_le16(0xffff); cp.voice_setting = cpu_to_le16(hdev->voice_setting); cp.retrans_effort = 0xff; @@ -172,7 +185,7 @@ void hci_setup_sync(struct hci_conn *conn, __u16 handle) } void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, - u16 latency, u16 to_multiplier) + u16 latency, u16 to_multiplier) { struct hci_cp_le_conn_update cp; struct hci_dev *hdev = conn->hdev; @@ -184,14 +197,15 @@ void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, cp.conn_interval_max = cpu_to_le16(max); cp.conn_latency = cpu_to_le16(latency); cp.supervision_timeout = cpu_to_le16(to_multiplier); - cp.min_ce_len = __constant_cpu_to_le16(0x0001); - cp.max_ce_len = __constant_cpu_to_le16(0x0001); + cp.min_ce_len = cpu_to_le16(0x0001); + cp.max_ce_len = cpu_to_le16(0x0001); hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp); } +EXPORT_SYMBOL(hci_le_conn_update); void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8], - __u8 ltk[16]) + __u8 ltk[16]) { struct hci_dev *hdev = conn->hdev; struct hci_cp_le_start_enc cp; @@ -207,6 +221,7 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8], hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp); } +EXPORT_SYMBOL(hci_le_start_enc); /* Device _must_ be locked */ void hci_sco_setup(struct hci_conn *conn, __u8 status) @@ -232,7 +247,7 @@ void hci_sco_setup(struct hci_conn *conn, __u8 status) static void hci_conn_timeout(struct work_struct *work) { struct hci_conn *conn = container_of(work, struct hci_conn, - disc_work.work); + disc_work.work); __u8 reason; BT_DBG("conn %p state %s", conn, state_to_string(conn->state)); @@ -280,9 +295,9 @@ static void hci_conn_enter_sniff_mode(struct hci_conn *conn) if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) { struct hci_cp_sniff_subrate cp; cp.handle = cpu_to_le16(conn->handle); - cp.max_latency = __constant_cpu_to_le16(0); - cp.min_remote_timeout = __constant_cpu_to_le16(0); - cp.min_local_timeout = __constant_cpu_to_le16(0); + cp.max_latency = cpu_to_le16(0); + cp.min_remote_timeout = cpu_to_le16(0); + cp.min_local_timeout = cpu_to_le16(0); hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp); } @@ -291,8 +306,8 @@ static void hci_conn_enter_sniff_mode(struct hci_conn *conn) cp.handle = cpu_to_le16(conn->handle); cp.max_interval = cpu_to_le16(hdev->sniff_max_interval); cp.min_interval = cpu_to_le16(hdev->sniff_min_interval); - cp.attempt = __constant_cpu_to_le16(4); - cp.timeout = __constant_cpu_to_le16(1); + cp.attempt = cpu_to_le16(4); + cp.timeout = cpu_to_le16(1); hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp); } } @@ -312,7 +327,7 @@ static void hci_conn_auto_accept(unsigned long arg) struct hci_dev *hdev = conn->hdev; hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst), - &conn->dst); + &conn->dst); } struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) @@ -361,7 +376,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout); setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn); setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept, - (unsigned long) conn); + (unsigned long) conn); atomic_set(&conn->refcnt, 0); @@ -410,10 +425,8 @@ int hci_conn_del(struct hci_conn *conn) } } - hci_chan_list_flush(conn); - if (conn->amp_mgr) - amp_mgr_put(conn->amp_mgr); + hci_chan_list_flush(conn); hci_conn_hash_del(hdev, conn); if (hdev->notify) @@ -441,8 +454,7 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src) read_lock(&hci_dev_list_lock); list_for_each_entry(d, &hci_dev_list, list) { - if (!test_bit(HCI_UP, &d->flags) || - test_bit(HCI_RAW, &d->flags)) + if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags)) continue; /* Simple routing: @@ -483,11 +495,6 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, if (type == LE_LINK) { le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst); if (!le) { - le = hci_conn_hash_lookup_state(hdev, LE_LINK, - BT_CONNECT); - if (le) - return ERR_PTR(-EBUSY); - le = hci_conn_add(hdev, LE_LINK, dst); if (!le) return ERR_PTR(-ENOMEM); @@ -538,7 +545,7 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, hci_conn_hold(sco); if (acl->state == BT_CONNECTED && - (sco->state == BT_OPEN || sco->state == BT_CLOSED)) { + (sco->state == BT_OPEN || sco->state == BT_CLOSED)) { set_bit(HCI_CONN_POWER_SAVE, &acl->flags); hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON); @@ -553,6 +560,7 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, return sco; } +EXPORT_SYMBOL(hci_connect); /* Check link security requirement */ int hci_conn_check_link_mode(struct hci_conn *conn) @@ -564,6 +572,7 @@ int hci_conn_check_link_mode(struct hci_conn *conn) return 1; } +EXPORT_SYMBOL(hci_conn_check_link_mode); /* Authenticate remote device */ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) @@ -591,7 +600,7 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) cp.handle = cpu_to_le16(conn->handle); hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED, - sizeof(cp), &cp); + sizeof(cp), &cp); if (conn->key_type != 0xff) set_bit(HCI_CONN_REAUTH_PEND, &conn->flags); } @@ -609,7 +618,7 @@ static void hci_conn_encrypt(struct hci_conn *conn) cp.handle = cpu_to_le16(conn->handle); cp.encrypt = 0x01; hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), - &cp); + &cp); } } @@ -639,7 +648,8 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) /* An unauthenticated combination key has sufficient security for security level 1 and 2. */ if (conn->key_type == HCI_LK_UNAUTH_COMBINATION && - (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW)) + (sec_level == BT_SECURITY_MEDIUM || + sec_level == BT_SECURITY_LOW)) goto encrypt; /* A combination key has always sufficient security for the security @@ -647,7 +657,8 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) is generated using maximum PIN code length (16). For pre 2.1 units. */ if (conn->key_type == HCI_LK_COMBINATION && - (sec_level != BT_SECURITY_HIGH || conn->pin_length == 16)) + (sec_level != BT_SECURITY_HIGH || + conn->pin_length == 16)) goto encrypt; auth: @@ -690,11 +701,12 @@ int hci_conn_change_link_key(struct hci_conn *conn) struct hci_cp_change_conn_link_key cp; cp.handle = cpu_to_le16(conn->handle); hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY, - sizeof(cp), &cp); + sizeof(cp), &cp); } return 0; } +EXPORT_SYMBOL(hci_conn_change_link_key); /* Switch role */ int hci_conn_switch_role(struct hci_conn *conn, __u8 role) @@ -740,7 +752,7 @@ void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active) timer: if (hdev->idle_timeout > 0) mod_timer(&conn->idle_timer, - jiffies + msecs_to_jiffies(hdev->idle_timeout)); + jiffies + msecs_to_jiffies(hdev->idle_timeout)); } /* Drop all connection on the device */ @@ -790,7 +802,7 @@ EXPORT_SYMBOL(hci_conn_put_device); int hci_get_conn_list(void __user *arg) { - struct hci_conn *c; + register struct hci_conn *c; struct hci_conn_list_req req, *cl; struct hci_conn_info *ci; struct hci_dev *hdev; diff --git a/trunk/net/bluetooth/hci_core.c b/trunk/net/bluetooth/hci_core.c index 08994ecc3b6a..411ace8e647b 100644 --- a/trunk/net/bluetooth/hci_core.c +++ b/trunk/net/bluetooth/hci_core.c @@ -25,10 +25,28 @@ /* Bluetooth HCI core. */ -#include -#include - +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include +#include + +#include +#include #include #include @@ -47,9 +65,6 @@ DEFINE_RWLOCK(hci_dev_list_lock); LIST_HEAD(hci_cb_list); DEFINE_RWLOCK(hci_cb_list_lock); -/* HCI ID Numbering */ -static DEFINE_IDA(hci_index_ida); - /* ---- HCI notifications ---- */ static void hci_notify(struct hci_dev *hdev, int event) @@ -109,9 +124,8 @@ static void hci_req_cancel(struct hci_dev *hdev, int err) } /* Execute request and wait for completion. */ -static int __hci_request(struct hci_dev *hdev, - void (*req)(struct hci_dev *hdev, unsigned long opt), - unsigned long opt, __u32 timeout) +static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), + unsigned long opt, __u32 timeout) { DECLARE_WAITQUEUE(wait, current); int err = 0; @@ -152,9 +166,8 @@ static int __hci_request(struct hci_dev *hdev, return err; } -static int hci_request(struct hci_dev *hdev, - void (*req)(struct hci_dev *hdev, unsigned long opt), - unsigned long opt, __u32 timeout) +static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), + unsigned long opt, __u32 timeout) { int ret; @@ -189,7 +202,7 @@ static void bredr_init(struct hci_dev *hdev) /* Mandatory initialization */ /* Reset */ - if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) { + if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) { set_bit(HCI_RESET, &hdev->flags); hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL); } @@ -222,7 +235,7 @@ static void bredr_init(struct hci_dev *hdev) hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type); /* Connection accept timeout ~20 secs */ - param = __constant_cpu_to_le16(0x7d00); + param = cpu_to_le16(0x7d00); hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m); bacpy(&cp.bdaddr, BDADDR_ANY); @@ -404,8 +417,7 @@ static void inquiry_cache_flush(struct hci_dev *hdev) INIT_LIST_HEAD(&cache->resolve); } -struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, - bdaddr_t *bdaddr) +struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr) { struct discovery_state *cache = &hdev->discovery; struct inquiry_entry *e; @@ -466,7 +478,7 @@ void hci_inquiry_cache_update_resolve(struct hci_dev *hdev, list_for_each_entry(p, &cache->resolve, list) { if (p->name_state != NAME_PENDING && - abs(p->data.rssi) >= abs(ie->data.rssi)) + abs(p->data.rssi) >= abs(ie->data.rssi)) break; pos = &p->list; } @@ -491,7 +503,7 @@ bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data, *ssp = true; if (ie->name_state == NAME_NEEDED && - data->rssi != ie->data.rssi) { + data->rssi != ie->data.rssi) { ie->data.rssi = data->rssi; hci_inquiry_cache_update_resolve(hdev, ie); } @@ -515,7 +527,7 @@ bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data, update: if (name_known && ie->name_state != NAME_KNOWN && - ie->name_state != NAME_PENDING) { + ie->name_state != NAME_PENDING) { ie->name_state = NAME_KNOWN; list_del(&ie->list); } @@ -593,7 +605,8 @@ int hci_inquiry(void __user *arg) hci_dev_lock(hdev); if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX || - inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) { + inquiry_cache_empty(hdev) || + ir.flags & IREQ_CACHE_FLUSH) { inquiry_cache_flush(hdev); do_inquiry = 1; } @@ -607,9 +620,7 @@ int hci_inquiry(void __user *arg) goto done; } - /* for unlimited number of responses we will use buffer with - * 255 entries - */ + /* for unlimited number of responses we will use buffer with 255 entries */ max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp; /* cache_dump can't sleep. Therefore we allocate temp buffer and then @@ -630,7 +641,7 @@ int hci_inquiry(void __user *arg) if (!copy_to_user(ptr, &ir, sizeof(ir))) { ptr += sizeof(ir); if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) * - ir.num_rsp)) + ir.num_rsp)) err = -EFAULT; } else err = -EFAULT; @@ -691,11 +702,11 @@ int hci_dev_open(__u16 dev) hdev->init_last_cmd = 0; ret = __hci_request(hdev, hci_init_req, 0, - msecs_to_jiffies(HCI_INIT_TIMEOUT)); + msecs_to_jiffies(HCI_INIT_TIMEOUT)); if (lmp_host_le_capable(hdev)) ret = __hci_request(hdev, hci_le_init_req, 0, - msecs_to_jiffies(HCI_INIT_TIMEOUT)); + msecs_to_jiffies(HCI_INIT_TIMEOUT)); clear_bit(HCI_INIT, &hdev->flags); } @@ -780,10 +791,10 @@ static int hci_dev_do_close(struct hci_dev *hdev) skb_queue_purge(&hdev->cmd_q); atomic_set(&hdev->cmd_cnt, 1); if (!test_bit(HCI_RAW, &hdev->flags) && - test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) { + test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) { set_bit(HCI_INIT, &hdev->flags); __hci_request(hdev, hci_reset_req, 0, - msecs_to_jiffies(250)); + msecs_to_jiffies(250)); clear_bit(HCI_INIT, &hdev->flags); } @@ -873,7 +884,7 @@ int hci_dev_reset(__u16 dev) if (!test_bit(HCI_RAW, &hdev->flags)) ret = __hci_request(hdev, hci_reset_req, 0, - msecs_to_jiffies(HCI_INIT_TIMEOUT)); + msecs_to_jiffies(HCI_INIT_TIMEOUT)); done: hci_req_unlock(hdev); @@ -913,7 +924,7 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg) switch (cmd) { case HCISETAUTH: err = hci_request(hdev, hci_auth_req, dr.dev_opt, - msecs_to_jiffies(HCI_INIT_TIMEOUT)); + msecs_to_jiffies(HCI_INIT_TIMEOUT)); break; case HCISETENCRYPT: @@ -925,23 +936,23 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg) if (!test_bit(HCI_AUTH, &hdev->flags)) { /* Auth must be enabled first */ err = hci_request(hdev, hci_auth_req, dr.dev_opt, - msecs_to_jiffies(HCI_INIT_TIMEOUT)); + msecs_to_jiffies(HCI_INIT_TIMEOUT)); if (err) break; } err = hci_request(hdev, hci_encrypt_req, dr.dev_opt, - msecs_to_jiffies(HCI_INIT_TIMEOUT)); + msecs_to_jiffies(HCI_INIT_TIMEOUT)); break; case HCISETSCAN: err = hci_request(hdev, hci_scan_req, dr.dev_opt, - msecs_to_jiffies(HCI_INIT_TIMEOUT)); + msecs_to_jiffies(HCI_INIT_TIMEOUT)); break; case HCISETLINKPOL: err = hci_request(hdev, hci_linkpol_req, dr.dev_opt, - msecs_to_jiffies(HCI_INIT_TIMEOUT)); + msecs_to_jiffies(HCI_INIT_TIMEOUT)); break; case HCISETLINKMODE: @@ -1092,7 +1103,7 @@ static void hci_power_on(struct work_struct *work) if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) schedule_delayed_work(&hdev->power_off, - msecs_to_jiffies(AUTO_OFF_TIMEOUT)); + msecs_to_jiffies(AUTO_OFF_TIMEOUT)); if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) mgmt_index_added(hdev); @@ -1101,7 +1112,7 @@ static void hci_power_on(struct work_struct *work) static void hci_power_off(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, - power_off.work); + power_off.work); BT_DBG("%s", hdev->name); @@ -1182,7 +1193,7 @@ struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) } static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn, - u8 key_type, u8 old_key_type) + u8 key_type, u8 old_key_type) { /* Legacy key */ if (key_type < 0x03) @@ -1223,7 +1234,7 @@ struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8]) list_for_each_entry(k, &hdev->long_term_keys, list) { if (k->ediv != ediv || - memcmp(rand, k->rand, sizeof(k->rand))) + memcmp(rand, k->rand, sizeof(k->rand))) continue; return k; @@ -1231,6 +1242,7 @@ struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8]) return NULL; } +EXPORT_SYMBOL(hci_find_ltk); struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type) @@ -1239,11 +1251,12 @@ struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, list_for_each_entry(k, &hdev->long_term_keys, list) if (addr_type == k->bdaddr_type && - bacmp(bdaddr, &k->bdaddr) == 0) + bacmp(bdaddr, &k->bdaddr) == 0) return k; return NULL; } +EXPORT_SYMBOL(hci_find_ltk_by_addr); int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key, bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len) @@ -1270,14 +1283,15 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key, * combination key for legacy pairing even when there's no * previous key */ if (type == HCI_LK_CHANGED_COMBINATION && - (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) { + (!conn || conn->remote_auth == 0xff) && + old_key_type == 0xff) { type = HCI_LK_COMBINATION; if (conn) conn->key_type = type; } bacpy(&key->bdaddr, bdaddr); - memcpy(key->val, val, HCI_LINK_KEY_SIZE); + memcpy(key->val, val, 16); key->pin_len = pin_len; if (type == HCI_LK_CHANGED_COMBINATION) @@ -1526,7 +1540,6 @@ static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt) memset(&cp, 0, sizeof(cp)); cp.enable = 1; - cp.filter_dup = 1; hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); } @@ -1694,39 +1707,41 @@ EXPORT_SYMBOL(hci_free_dev); /* Register HCI device */ int hci_register_dev(struct hci_dev *hdev) { + struct list_head *head, *p; int id, error; if (!hdev->open || !hdev->close) return -EINVAL; + write_lock(&hci_dev_list_lock); + /* Do not allow HCI_AMP devices to register at index 0, * so the index can be used as the AMP controller ID. */ - switch (hdev->dev_type) { - case HCI_BREDR: - id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL); - break; - case HCI_AMP: - id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL); - break; - default: - return -EINVAL; - } + id = (hdev->dev_type == HCI_BREDR) ? 0 : 1; + head = &hci_dev_list; - if (id < 0) - return id; + /* Find first available device id */ + list_for_each(p, &hci_dev_list) { + int nid = list_entry(p, struct hci_dev, list)->id; + if (nid > id) + break; + if (nid == id) + id++; + head = p; + } sprintf(hdev->name, "hci%d", id); hdev->id = id; BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); - write_lock(&hci_dev_list_lock); - list_add(&hdev->list, &hci_dev_list); + list_add(&hdev->list, head); + write_unlock(&hci_dev_list_lock); hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND | - WQ_MEM_RECLAIM, 1); + WQ_MEM_RECLAIM, 1); if (!hdev->workqueue) { error = -ENOMEM; goto err; @@ -1737,8 +1752,7 @@ int hci_register_dev(struct hci_dev *hdev) goto err_wqueue; hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev, - RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, - hdev); + RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev); if (hdev->rfkill) { if (rfkill_register(hdev->rfkill) < 0) { rfkill_destroy(hdev->rfkill); @@ -1758,7 +1772,6 @@ int hci_register_dev(struct hci_dev *hdev) err_wqueue: destroy_workqueue(hdev->workqueue); err: - ida_simple_remove(&hci_index_ida, hdev->id); write_lock(&hci_dev_list_lock); list_del(&hdev->list); write_unlock(&hci_dev_list_lock); @@ -1770,14 +1783,12 @@ EXPORT_SYMBOL(hci_register_dev); /* Unregister HCI device */ void hci_unregister_dev(struct hci_dev *hdev) { - int i, id; + int i; BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); set_bit(HCI_UNREGISTER, &hdev->dev_flags); - id = hdev->id; - write_lock(&hci_dev_list_lock); list_del(&hdev->list); write_unlock(&hci_dev_list_lock); @@ -1788,7 +1799,7 @@ void hci_unregister_dev(struct hci_dev *hdev) kfree_skb(hdev->reassembly[i]); if (!test_bit(HCI_INIT, &hdev->flags) && - !test_bit(HCI_SETUP, &hdev->dev_flags)) { + !test_bit(HCI_SETUP, &hdev->dev_flags)) { hci_dev_lock(hdev); mgmt_index_removed(hdev); hci_dev_unlock(hdev); @@ -1818,8 +1829,6 @@ void hci_unregister_dev(struct hci_dev *hdev) hci_dev_unlock(hdev); hci_dev_put(hdev); - - ida_simple_remove(&hci_index_ida, id); } EXPORT_SYMBOL(hci_unregister_dev); @@ -1844,7 +1853,7 @@ int hci_recv_frame(struct sk_buff *skb) { struct hci_dev *hdev = (struct hci_dev *) skb->dev; if (!hdev || (!test_bit(HCI_UP, &hdev->flags) - && !test_bit(HCI_INIT, &hdev->flags))) { + && !test_bit(HCI_INIT, &hdev->flags))) { kfree_skb(skb); return -ENXIO; } @@ -1863,7 +1872,7 @@ int hci_recv_frame(struct sk_buff *skb) EXPORT_SYMBOL(hci_recv_frame); static int hci_reassembly(struct hci_dev *hdev, int type, void *data, - int count, __u8 index) + int count, __u8 index) { int len = 0; int hlen = 0; @@ -1872,7 +1881,7 @@ static int hci_reassembly(struct hci_dev *hdev, int type, void *data, struct bt_skb_cb *scb; if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) || - index >= NUM_REASSEMBLY) + index >= NUM_REASSEMBLY) return -EILSEQ; skb = hdev->reassembly[index]; @@ -2014,7 +2023,7 @@ int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count) type = bt_cb(skb)->pkt_type; rem = hci_reassembly(hdev, type, data, count, - STREAM_REASSEMBLY); + STREAM_REASSEMBLY); if (rem < 0) return rem; @@ -2148,7 +2157,7 @@ static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags) } static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue, - struct sk_buff *skb, __u16 flags) + struct sk_buff *skb, __u16 flags) { struct hci_dev *hdev = conn->hdev; struct sk_buff *list; @@ -2207,6 +2216,7 @@ void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags) queue_work(hdev->workqueue, &hdev->tx_work); } +EXPORT_SYMBOL(hci_send_acl); /* Send SCO data */ void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb) @@ -2229,12 +2239,12 @@ void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb) skb_queue_tail(&conn->data_q, skb); queue_work(hdev->workqueue, &hdev->tx_work); } +EXPORT_SYMBOL(hci_send_sco); /* ---- HCI TX task (outgoing data) ---- */ /* HCI Connection scheduler */ -static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, - int *quote) +static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *conn = NULL, *c; @@ -2293,7 +2303,7 @@ static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, return conn; } -static void hci_link_tx_to(struct hci_dev *hdev, __u8 type) +static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; @@ -2306,16 +2316,16 @@ static void hci_link_tx_to(struct hci_dev *hdev, __u8 type) list_for_each_entry_rcu(c, &h->list, list) { if (c->type == type && c->sent) { BT_ERR("%s killing stalled connection %s", - hdev->name, batostr(&c->dst)); - hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM); + hdev->name, batostr(&c->dst)); + hci_acl_disconn(c, 0x13); } } rcu_read_unlock(); } -static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type, - int *quote) +static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type, + int *quote) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_chan *chan = NULL; @@ -2432,7 +2442,7 @@ static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type) skb->priority = HCI_PRIO_MAX - 1; BT_DBG("chan %p skb %p promoted to %d", chan, skb, - skb->priority); + skb->priority); } if (hci_conn_num(hdev, type) == num) @@ -2449,18 +2459,18 @@ static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb) return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len); } -static void __check_timeout(struct hci_dev *hdev, unsigned int cnt) +static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt) { if (!test_bit(HCI_RAW, &hdev->flags)) { /* ACL tx timeout must be longer than maximum * link supervision timeout (40.9 seconds) */ if (!cnt && time_after(jiffies, hdev->acl_last_tx + - msecs_to_jiffies(HCI_ACL_TX_TIMEOUT))) + msecs_to_jiffies(HCI_ACL_TX_TIMEOUT))) hci_link_tx_to(hdev, ACL_LINK); } } -static void hci_sched_acl_pkt(struct hci_dev *hdev) +static inline void hci_sched_acl_pkt(struct hci_dev *hdev) { unsigned int cnt = hdev->acl_cnt; struct hci_chan *chan; @@ -2470,11 +2480,11 @@ static void hci_sched_acl_pkt(struct hci_dev *hdev) __check_timeout(hdev, cnt); while (hdev->acl_cnt && - (chan = hci_chan_sent(hdev, ACL_LINK, "e))) { + (chan = hci_chan_sent(hdev, ACL_LINK, "e))) { u32 priority = (skb_peek(&chan->data_q))->priority; while (quote-- && (skb = skb_peek(&chan->data_q))) { BT_DBG("chan %p skb %p len %d priority %u", chan, skb, - skb->len, skb->priority); + skb->len, skb->priority); /* Stop if priority has changed */ if (skb->priority < priority) @@ -2498,7 +2508,7 @@ static void hci_sched_acl_pkt(struct hci_dev *hdev) hci_prio_recalculate(hdev, ACL_LINK); } -static void hci_sched_acl_blk(struct hci_dev *hdev) +static inline void hci_sched_acl_blk(struct hci_dev *hdev) { unsigned int cnt = hdev->block_cnt; struct hci_chan *chan; @@ -2508,13 +2518,13 @@ static void hci_sched_acl_blk(struct hci_dev *hdev) __check_timeout(hdev, cnt); while (hdev->block_cnt > 0 && - (chan = hci_chan_sent(hdev, ACL_LINK, "e))) { + (chan = hci_chan_sent(hdev, ACL_LINK, "e))) { u32 priority = (skb_peek(&chan->data_q))->priority; while (quote > 0 && (skb = skb_peek(&chan->data_q))) { int blocks; BT_DBG("chan %p skb %p len %d priority %u", chan, skb, - skb->len, skb->priority); + skb->len, skb->priority); /* Stop if priority has changed */ if (skb->priority < priority) @@ -2527,7 +2537,7 @@ static void hci_sched_acl_blk(struct hci_dev *hdev) return; hci_conn_enter_active_mode(chan->conn, - bt_cb(skb)->force_active); + bt_cb(skb)->force_active); hci_send_frame(skb); hdev->acl_last_tx = jiffies; @@ -2544,7 +2554,7 @@ static void hci_sched_acl_blk(struct hci_dev *hdev) hci_prio_recalculate(hdev, ACL_LINK); } -static void hci_sched_acl(struct hci_dev *hdev) +static inline void hci_sched_acl(struct hci_dev *hdev) { BT_DBG("%s", hdev->name); @@ -2563,7 +2573,7 @@ static void hci_sched_acl(struct hci_dev *hdev) } /* Schedule SCO */ -static void hci_sched_sco(struct hci_dev *hdev) +static inline void hci_sched_sco(struct hci_dev *hdev) { struct hci_conn *conn; struct sk_buff *skb; @@ -2586,7 +2596,7 @@ static void hci_sched_sco(struct hci_dev *hdev) } } -static void hci_sched_esco(struct hci_dev *hdev) +static inline void hci_sched_esco(struct hci_dev *hdev) { struct hci_conn *conn; struct sk_buff *skb; @@ -2597,8 +2607,7 @@ static void hci_sched_esco(struct hci_dev *hdev) if (!hci_conn_num(hdev, ESCO_LINK)) return; - while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, - "e))) { + while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, "e))) { while (quote-- && (skb = skb_dequeue(&conn->data_q))) { BT_DBG("skb %p len %d", skb, skb->len); hci_send_frame(skb); @@ -2610,7 +2619,7 @@ static void hci_sched_esco(struct hci_dev *hdev) } } -static void hci_sched_le(struct hci_dev *hdev) +static inline void hci_sched_le(struct hci_dev *hdev) { struct hci_chan *chan; struct sk_buff *skb; @@ -2625,7 +2634,7 @@ static void hci_sched_le(struct hci_dev *hdev) /* LE tx timeout must be longer than maximum * link supervision timeout (40.9 seconds) */ if (!hdev->le_cnt && hdev->le_pkts && - time_after(jiffies, hdev->le_last_tx + HZ * 45)) + time_after(jiffies, hdev->le_last_tx + HZ * 45)) hci_link_tx_to(hdev, LE_LINK); } @@ -2635,7 +2644,7 @@ static void hci_sched_le(struct hci_dev *hdev) u32 priority = (skb_peek(&chan->data_q))->priority; while (quote-- && (skb = skb_peek(&chan->data_q))) { BT_DBG("chan %p skb %p len %d priority %u", chan, skb, - skb->len, skb->priority); + skb->len, skb->priority); /* Stop if priority has changed */ if (skb->priority < priority) @@ -2667,7 +2676,7 @@ static void hci_tx_work(struct work_struct *work) struct sk_buff *skb; BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt, - hdev->sco_cnt, hdev->le_cnt); + hdev->sco_cnt, hdev->le_cnt); /* Schedule queues and send stuff to HCI driver */ @@ -2687,7 +2696,7 @@ static void hci_tx_work(struct work_struct *work) /* ----- HCI RX task (incoming data processing) ----- */ /* ACL data packet */ -static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_acl_hdr *hdr = (void *) skb->data; struct hci_conn *conn; @@ -2699,8 +2708,7 @@ static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) flags = hci_flags(handle); handle = hci_handle(handle); - BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, - handle, flags); + BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags); hdev->stat.acl_rx++; @@ -2724,14 +2732,14 @@ static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) return; } else { BT_ERR("%s ACL packet for unknown connection handle %d", - hdev->name, handle); + hdev->name, handle); } kfree_skb(skb); } /* SCO data packet */ -static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_sco_hdr *hdr = (void *) skb->data; struct hci_conn *conn; @@ -2755,7 +2763,7 @@ static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb) return; } else { BT_ERR("%s SCO packet for unknown connection handle %d", - hdev->name, handle); + hdev->name, handle); } kfree_skb(skb); diff --git a/trunk/net/bluetooth/hci_event.c b/trunk/net/bluetooth/hci_event.c index 1ba929c05d0d..4eefb7f65cf6 100644 --- a/trunk/net/bluetooth/hci_event.c +++ b/trunk/net/bluetooth/hci_event.c @@ -24,7 +24,20 @@ /* Bluetooth HCI event handling. */ -#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include #include #include @@ -82,8 +95,7 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb) hci_conn_check_pending(hdev); } -static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, - struct sk_buff *skb) +static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb) { BT_DBG("%s", hdev->name); } @@ -154,8 +166,7 @@ static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } -static void hci_cc_read_def_link_policy(struct hci_dev *hdev, - struct sk_buff *skb) +static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_rp_read_def_link_policy *rp = (void *) skb->data; @@ -167,8 +178,7 @@ static void hci_cc_read_def_link_policy(struct hci_dev *hdev, hdev->link_policy = __le16_to_cpu(rp->policy); } -static void hci_cc_write_def_link_policy(struct hci_dev *hdev, - struct sk_buff *skb) +static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb) { __u8 status = *((__u8 *) skb->data); void *sent; @@ -319,7 +329,7 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb) if (hdev->discov_timeout > 0) { int to = msecs_to_jiffies(hdev->discov_timeout * 1000); queue_delayed_work(hdev->workqueue, &hdev->discov_off, - to); + to); } } else if (old_iscan) mgmt_discoverable(hdev, 0); @@ -348,7 +358,7 @@ static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) memcpy(hdev->dev_class, rp->dev_class, 3); BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name, - hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]); + hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]); } static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) @@ -396,8 +406,7 @@ static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); } -static void hci_cc_write_voice_setting(struct hci_dev *hdev, - struct sk_buff *skb) +static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) { __u8 status = *((__u8 *) skb->data); __u16 setting; @@ -464,7 +473,7 @@ static u8 hci_get_inquiry_mode(struct hci_dev *hdev) return 1; if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 && - hdev->lmp_subver == 0x0757) + hdev->lmp_subver == 0x0757) return 1; if (hdev->manufacturer == 15) { @@ -477,7 +486,7 @@ static u8 hci_get_inquiry_mode(struct hci_dev *hdev) } if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 && - hdev->lmp_subver == 0x1805) + hdev->lmp_subver == 0x1805) return 1; return 0; @@ -557,7 +566,7 @@ static void hci_setup(struct hci_dev *hdev) if (hdev->hci_ver > BLUETOOTH_VER_1_1) hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); - if (lmp_ssp_capable(hdev)) { + if (hdev->features[6] & LMP_SIMPLE_PAIR) { if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) { u8 mode = 0x01; hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, @@ -609,7 +618,8 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb) hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver); BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name, - hdev->manufacturer, hdev->hci_ver, hdev->hci_rev); + hdev->manufacturer, + hdev->hci_ver, hdev->hci_rev); if (test_bit(HCI_INIT, &hdev->flags)) hci_setup(hdev); @@ -636,8 +646,7 @@ static void hci_setup_link_policy(struct hci_dev *hdev) hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp); } -static void hci_cc_read_local_commands(struct hci_dev *hdev, - struct sk_buff *skb) +static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_rp_read_local_commands *rp = (void *) skb->data; @@ -655,8 +664,7 @@ static void hci_cc_read_local_commands(struct hci_dev *hdev, hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status); } -static void hci_cc_read_local_features(struct hci_dev *hdev, - struct sk_buff *skb) +static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_rp_read_local_features *rp = (void *) skb->data; @@ -705,10 +713,10 @@ static void hci_cc_read_local_features(struct hci_dev *hdev, hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5); BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name, - hdev->features[0], hdev->features[1], - hdev->features[2], hdev->features[3], - hdev->features[4], hdev->features[5], - hdev->features[6], hdev->features[7]); + hdev->features[0], hdev->features[1], + hdev->features[2], hdev->features[3], + hdev->features[4], hdev->features[5], + hdev->features[6], hdev->features[7]); } static void hci_set_le_support(struct hci_dev *hdev) @@ -728,7 +736,7 @@ static void hci_set_le_support(struct hci_dev *hdev) } static void hci_cc_read_local_ext_features(struct hci_dev *hdev, - struct sk_buff *skb) + struct sk_buff *skb) { struct hci_rp_read_local_ext_features *rp = (void *) skb->data; @@ -754,7 +762,7 @@ static void hci_cc_read_local_ext_features(struct hci_dev *hdev, } static void hci_cc_read_flow_control_mode(struct hci_dev *hdev, - struct sk_buff *skb) + struct sk_buff *skb) { struct hci_rp_read_flow_control_mode *rp = (void *) skb->data; @@ -790,8 +798,9 @@ static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) hdev->acl_cnt = hdev->acl_pkts; hdev->sco_cnt = hdev->sco_pkts; - BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu, - hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts); + BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, + hdev->acl_mtu, hdev->acl_pkts, + hdev->sco_mtu, hdev->sco_pkts); } static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb) @@ -807,7 +816,7 @@ static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb) } static void hci_cc_read_data_block_size(struct hci_dev *hdev, - struct sk_buff *skb) + struct sk_buff *skb) { struct hci_rp_read_data_block_size *rp = (void *) skb->data; @@ -823,7 +832,7 @@ static void hci_cc_read_data_block_size(struct hci_dev *hdev, hdev->block_cnt = hdev->num_blocks; BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu, - hdev->block_cnt, hdev->block_len); + hdev->block_cnt, hdev->block_len); hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status); } @@ -838,7 +847,7 @@ static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb) } static void hci_cc_read_local_amp_info(struct hci_dev *hdev, - struct sk_buff *skb) + struct sk_buff *skb) { struct hci_rp_read_local_amp_info *rp = (void *) skb->data; @@ -862,7 +871,7 @@ static void hci_cc_read_local_amp_info(struct hci_dev *hdev, } static void hci_cc_delete_stored_link_key(struct hci_dev *hdev, - struct sk_buff *skb) + struct sk_buff *skb) { __u8 status = *((__u8 *) skb->data); @@ -881,7 +890,7 @@ static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb) } static void hci_cc_write_inquiry_mode(struct hci_dev *hdev, - struct sk_buff *skb) + struct sk_buff *skb) { __u8 status = *((__u8 *) skb->data); @@ -891,7 +900,7 @@ static void hci_cc_write_inquiry_mode(struct hci_dev *hdev, } static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, - struct sk_buff *skb) + struct sk_buff *skb) { struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data; @@ -950,7 +959,7 @@ static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb) if (test_bit(HCI_MGMT, &hdev->dev_flags)) mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr, - rp->status); + rp->status); hci_dev_unlock(hdev); } @@ -991,7 +1000,7 @@ static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb) } static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, - struct sk_buff *skb) + struct sk_buff *skb) { struct hci_rp_user_confirm_reply *rp = (void *) skb->data; @@ -1022,7 +1031,7 @@ static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb) } static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, - struct sk_buff *skb) + struct sk_buff *skb) { struct hci_rp_user_confirm_reply *rp = (void *) skb->data; @@ -1038,7 +1047,7 @@ static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, } static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev, - struct sk_buff *skb) + struct sk_buff *skb) { struct hci_rp_read_local_oob_data *rp = (void *) skb->data; @@ -1067,7 +1076,7 @@ static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb) } static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, - struct sk_buff *skb) + struct sk_buff *skb) { struct hci_cp_le_set_scan_enable *cp; __u8 status = *((__u8 *) skb->data); @@ -1147,8 +1156,8 @@ static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb) hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status); } -static void hci_cc_write_le_host_supported(struct hci_dev *hdev, - struct sk_buff *skb) +static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev, + struct sk_buff *skb) { struct hci_cp_write_le_host_supported *sent; __u8 status = *((__u8 *) skb->data); @@ -1167,13 +1176,13 @@ static void hci_cc_write_le_host_supported(struct hci_dev *hdev, } if (test_bit(HCI_MGMT, &hdev->dev_flags) && - !test_bit(HCI_INIT, &hdev->flags)) + !test_bit(HCI_INIT, &hdev->flags)) mgmt_le_enable_complete(hdev, sent->le, status); hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status); } -static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) +static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) { BT_DBG("%s status 0x%x", hdev->name, status); @@ -1194,7 +1203,7 @@ static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) hci_dev_unlock(hdev); } -static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) +static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) { struct hci_cp_create_conn *cp; struct hci_conn *conn; @@ -1324,7 +1333,7 @@ static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status) } static int hci_outgoing_auth_needed(struct hci_dev *hdev, - struct hci_conn *conn) + struct hci_conn *conn) { if (conn->state != BT_CONFIG || !conn->out) return 0; @@ -1334,14 +1343,15 @@ static int hci_outgoing_auth_needed(struct hci_dev *hdev, /* Only request authentication for SSP connections or non-SSP * devices with sec_level HIGH or if MITM protection is requested */ - if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) && - conn->pending_sec_level != BT_SECURITY_HIGH) + if (!hci_conn_ssp_enabled(conn) && + conn->pending_sec_level != BT_SECURITY_HIGH && + !(conn->auth_type & 0x01)) return 0; return 1; } -static int hci_resolve_name(struct hci_dev *hdev, +static inline int hci_resolve_name(struct hci_dev *hdev, struct inquiry_entry *e) { struct hci_cp_remote_name_req cp; @@ -1628,7 +1638,7 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status) conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr); BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr), - conn); + conn); if (status) { if (conn && conn->state == BT_CONNECT) { @@ -1658,7 +1668,7 @@ static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status) BT_DBG("%s status 0x%x", hdev->name, status); } -static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { __u8 status = *((__u8 *) skb->data); struct discovery_state *discov = &hdev->discovery; @@ -1698,7 +1708,7 @@ static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } -static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct inquiry_data data; struct inquiry_info *info = (void *) (skb->data + 1); @@ -1735,7 +1745,7 @@ static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } -static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_conn_complete *ev = (void *) skb->data; struct hci_conn *conn; @@ -1813,18 +1823,18 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_conn_check_pending(hdev); } -static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_conn_request *ev = (void *) skb->data; int mask = hdev->link_mode; - BT_DBG("%s bdaddr %s type 0x%x", hdev->name, batostr(&ev->bdaddr), - ev->link_type); + BT_DBG("%s bdaddr %s type 0x%x", hdev->name, + batostr(&ev->bdaddr), ev->link_type); mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type); if ((mask & HCI_LM_ACCEPT) && - !hci_blacklist_lookup(hdev, &ev->bdaddr)) { + !hci_blacklist_lookup(hdev, &ev->bdaddr)) { /* Connection accepted */ struct inquiry_entry *ie; struct hci_conn *conn; @@ -1835,8 +1845,7 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) if (ie) memcpy(ie->data.dev_class, ev->dev_class, 3); - conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, - &ev->bdaddr); + conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); if (!conn) { conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr); if (!conn) { @@ -1869,9 +1878,9 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) bacpy(&cp.bdaddr, &ev->bdaddr); cp.pkt_type = cpu_to_le16(conn->pkt_type); - cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40); - cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40); - cp.max_latency = __constant_cpu_to_le16(0xffff); + cp.tx_bandwidth = cpu_to_le32(0x00001f40); + cp.rx_bandwidth = cpu_to_le32(0x00001f40); + cp.max_latency = cpu_to_le16(0xffff); cp.content_format = cpu_to_le16(hdev->voice_setting); cp.retrans_effort = 0xff; @@ -1888,7 +1897,7 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) } } -static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_disconn_complete *ev = (void *) skb->data; struct hci_conn *conn; @@ -1905,10 +1914,10 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) conn->state = BT_CLOSED; if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) && - (conn->type == ACL_LINK || conn->type == LE_LINK)) { + (conn->type == ACL_LINK || conn->type == LE_LINK)) { if (ev->status != 0) mgmt_disconnect_failed(hdev, &conn->dst, conn->type, - conn->dst_type, ev->status); + conn->dst_type, ev->status); else mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type); @@ -1925,7 +1934,7 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } -static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_auth_complete *ev = (void *) skb->data; struct hci_conn *conn; @@ -1940,7 +1949,7 @@ static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) if (!ev->status) { if (!hci_conn_ssp_enabled(conn) && - test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) { + test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) { BT_INFO("re-auth of legacy device is not possible."); } else { conn->link_mode |= HCI_LM_AUTH; @@ -1960,7 +1969,7 @@ static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) cp.handle = ev->handle; cp.encrypt = 0x01; hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), - &cp); + &cp); } else { conn->state = BT_CONNECTED; hci_proto_connect_cfm(conn, ev->status); @@ -1980,7 +1989,7 @@ static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) cp.handle = ev->handle; cp.encrypt = 0x01; hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), - &cp); + &cp); } else { clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); hci_encrypt_cfm(conn, ev->status, 0x00); @@ -1991,7 +2000,7 @@ static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } -static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_remote_name *ev = (void *) skb->data; struct hci_conn *conn; @@ -2030,7 +2039,7 @@ static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } -static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_encrypt_change *ev = (void *) skb->data; struct hci_conn *conn; @@ -2073,8 +2082,7 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } -static void hci_change_link_key_complete_evt(struct hci_dev *hdev, - struct sk_buff *skb) +static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_change_link_key_complete *ev = (void *) skb->data; struct hci_conn *conn; @@ -2096,8 +2104,7 @@ static void hci_change_link_key_complete_evt(struct hci_dev *hdev, hci_dev_unlock(hdev); } -static void hci_remote_features_evt(struct hci_dev *hdev, - struct sk_buff *skb) +static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_remote_features *ev = (void *) skb->data; struct hci_conn *conn; @@ -2121,7 +2128,7 @@ static void hci_remote_features_evt(struct hci_dev *hdev, cp.handle = ev->handle; cp.page = 0x01; hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES, - sizeof(cp), &cp); + sizeof(cp), &cp); goto unlock; } @@ -2146,18 +2153,17 @@ static void hci_remote_features_evt(struct hci_dev *hdev, hci_dev_unlock(hdev); } -static void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb) { BT_DBG("%s", hdev->name); } -static void hci_qos_setup_complete_evt(struct hci_dev *hdev, - struct sk_buff *skb) +static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { BT_DBG("%s", hdev->name); } -static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_cmd_complete *ev = (void *) skb->data; __u16 opcode; @@ -2378,7 +2384,7 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) } } -static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_cmd_status *ev = (void *) skb->data; __u16 opcode; @@ -2459,7 +2465,7 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb) } } -static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_role_change *ev = (void *) skb->data; struct hci_conn *conn; @@ -2485,7 +2491,7 @@ static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } -static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_num_comp_pkts *ev = (void *) skb->data; int i; @@ -2496,7 +2502,7 @@ static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb) } if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) + - ev->num_hndl * sizeof(struct hci_comp_pkts_info)) { + ev->num_hndl * sizeof(struct hci_comp_pkts_info)) { BT_DBG("%s bad parameters", hdev->name); return; } @@ -2551,7 +2557,8 @@ static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb) queue_work(hdev->workqueue, &hdev->tx_work); } -static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev, + struct sk_buff *skb) { struct hci_ev_num_comp_blocks *ev = (void *) skb->data; int i; @@ -2562,13 +2569,13 @@ static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb) } if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) + - ev->num_hndl * sizeof(struct hci_comp_blocks_info)) { + ev->num_hndl * sizeof(struct hci_comp_blocks_info)) { BT_DBG("%s bad parameters", hdev->name); return; } BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks, - ev->num_hndl); + ev->num_hndl); for (i = 0; i < ev->num_hndl; i++) { struct hci_comp_blocks_info *info = &ev->handles[i]; @@ -2600,7 +2607,7 @@ static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb) queue_work(hdev->workqueue, &hdev->tx_work); } -static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_mode_change *ev = (void *) skb->data; struct hci_conn *conn; @@ -2614,8 +2621,7 @@ static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb) conn->mode = ev->mode; conn->interval = __le16_to_cpu(ev->interval); - if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, - &conn->flags)) { + if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) { if (conn->mode == HCI_CM_ACTIVE) set_bit(HCI_CONN_POWER_SAVE, &conn->flags); else @@ -2629,7 +2635,7 @@ static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } -static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_pin_code_req *ev = (void *) skb->data; struct hci_conn *conn; @@ -2650,7 +2656,7 @@ static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb) if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags)) hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, - sizeof(ev->bdaddr), &ev->bdaddr); + sizeof(ev->bdaddr), &ev->bdaddr); else if (test_bit(HCI_MGMT, &hdev->dev_flags)) { u8 secure; @@ -2666,7 +2672,7 @@ static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } -static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_link_key_req *ev = (void *) skb->data; struct hci_cp_link_key_reply cp; @@ -2683,15 +2689,15 @@ static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) key = hci_find_link_key(hdev, &ev->bdaddr); if (!key) { BT_DBG("%s link key not found for %s", hdev->name, - batostr(&ev->bdaddr)); + batostr(&ev->bdaddr)); goto not_found; } BT_DBG("%s found key type %u for %s", hdev->name, key->type, - batostr(&ev->bdaddr)); + batostr(&ev->bdaddr)); if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) && - key->type == HCI_LK_DEBUG_COMBINATION) { + key->type == HCI_LK_DEBUG_COMBINATION) { BT_DBG("%s ignoring debug key", hdev->name); goto not_found; } @@ -2699,15 +2705,16 @@ static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); if (conn) { if (key->type == HCI_LK_UNAUTH_COMBINATION && - conn->auth_type != 0xff && (conn->auth_type & 0x01)) { + conn->auth_type != 0xff && + (conn->auth_type & 0x01)) { BT_DBG("%s ignoring unauthenticated key", hdev->name); goto not_found; } if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 && - conn->pending_sec_level == BT_SECURITY_HIGH) { - BT_DBG("%s ignoring key unauthenticated for high security", - hdev->name); + conn->pending_sec_level == BT_SECURITY_HIGH) { + BT_DBG("%s ignoring key unauthenticated for high \ + security", hdev->name); goto not_found; } @@ -2716,7 +2723,7 @@ static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) } bacpy(&cp.bdaddr, &ev->bdaddr); - memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE); + memcpy(cp.link_key, key->val, 16); hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp); @@ -2729,7 +2736,7 @@ static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } -static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_link_key_notify *ev = (void *) skb->data; struct hci_conn *conn; @@ -2753,12 +2760,12 @@ static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags)) hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key, - ev->key_type, pin_len); + ev->key_type, pin_len); hci_dev_unlock(hdev); } -static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_clock_offset *ev = (void *) skb->data; struct hci_conn *conn; @@ -2781,7 +2788,7 @@ static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } -static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_pkt_type_change *ev = (void *) skb->data; struct hci_conn *conn; @@ -2797,7 +2804,7 @@ static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } -static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_pscan_rep_mode *ev = (void *) skb->data; struct inquiry_entry *ie; @@ -2815,8 +2822,7 @@ static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } -static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, - struct sk_buff *skb) +static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct inquiry_data data; int num_rsp = *((__u8 *) skb->data); @@ -2875,8 +2881,7 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, hci_dev_unlock(hdev); } -static void hci_remote_ext_features_evt(struct hci_dev *hdev, - struct sk_buff *skb) +static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_remote_ext_features *ev = (void *) skb->data; struct hci_conn *conn; @@ -2924,8 +2929,7 @@ static void hci_remote_ext_features_evt(struct hci_dev *hdev, hci_dev_unlock(hdev); } -static void hci_sync_conn_complete_evt(struct hci_dev *hdev, - struct sk_buff *skb) +static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_sync_conn_complete *ev = (void *) skb->data; struct hci_conn *conn; @@ -2980,20 +2984,19 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, hci_dev_unlock(hdev); } -static void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb) { BT_DBG("%s", hdev->name); } -static void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_sniff_subrate *ev = (void *) skb->data; BT_DBG("%s status %d", hdev->name, ev->status); } -static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, - struct sk_buff *skb) +static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct inquiry_data data; struct extended_inquiry_info *info = (void *) (skb->data + 1); @@ -3040,51 +3043,7 @@ static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, hci_dev_unlock(hdev); } -static void hci_key_refresh_complete_evt(struct hci_dev *hdev, - struct sk_buff *skb) -{ - struct hci_ev_key_refresh_complete *ev = (void *) skb->data; - struct hci_conn *conn; - - BT_DBG("%s status %u handle %u", hdev->name, ev->status, - __le16_to_cpu(ev->handle)); - - hci_dev_lock(hdev); - - conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); - if (!conn) - goto unlock; - - if (!ev->status) - conn->sec_level = conn->pending_sec_level; - - clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); - - if (ev->status && conn->state == BT_CONNECTED) { - hci_acl_disconn(conn, HCI_ERROR_AUTH_FAILURE); - hci_conn_put(conn); - goto unlock; - } - - if (conn->state == BT_CONFIG) { - if (!ev->status) - conn->state = BT_CONNECTED; - - hci_proto_connect_cfm(conn, ev->status); - hci_conn_put(conn); - } else { - hci_auth_cfm(conn, ev->status); - - hci_conn_hold(conn); - conn->disc_timeout = HCI_DISCONN_TIMEOUT; - hci_conn_put(conn); - } - -unlock: - hci_dev_unlock(hdev); -} - -static u8 hci_get_auth_req(struct hci_conn *conn) +static inline u8 hci_get_auth_req(struct hci_conn *conn) { /* If remote requests dedicated bonding follow that lead */ if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) { @@ -3103,7 +3062,7 @@ static u8 hci_get_auth_req(struct hci_conn *conn) return conn->auth_type; } -static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_io_capa_request *ev = (void *) skb->data; struct hci_conn *conn; @@ -3122,7 +3081,7 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) goto unlock; if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) || - (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) { + (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) { struct hci_cp_io_capability_reply cp; bacpy(&cp.bdaddr, &ev->bdaddr); @@ -3133,14 +3092,14 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) conn->auth_type = hci_get_auth_req(conn); cp.authentication = conn->auth_type; - if (hci_find_remote_oob_data(hdev, &conn->dst) && - (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags))) + if ((conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)) && + hci_find_remote_oob_data(hdev, &conn->dst)) cp.oob_data = 0x01; else cp.oob_data = 0x00; hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY, - sizeof(cp), &cp); + sizeof(cp), &cp); } else { struct hci_cp_io_capability_neg_reply cp; @@ -3148,14 +3107,14 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED; hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY, - sizeof(cp), &cp); + sizeof(cp), &cp); } unlock: hci_dev_unlock(hdev); } -static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_io_capa_reply *ev = (void *) skb->data; struct hci_conn *conn; @@ -3177,8 +3136,8 @@ static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } -static void hci_user_confirm_request_evt(struct hci_dev *hdev, - struct sk_buff *skb) +static inline void hci_user_confirm_request_evt(struct hci_dev *hdev, + struct sk_buff *skb) { struct hci_ev_user_confirm_req *ev = (void *) skb->data; int loc_mitm, rem_mitm, confirm_hint = 0; @@ -3206,13 +3165,13 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev, if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) { BT_DBG("Rejecting request: remote device can't provide MITM"); hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY, - sizeof(ev->bdaddr), &ev->bdaddr); + sizeof(ev->bdaddr), &ev->bdaddr); goto unlock; } /* If no side requires MITM protection; auto-accept */ if ((!loc_mitm || conn->remote_cap == 0x03) && - (!rem_mitm || conn->io_capability == 0x03)) { + (!rem_mitm || conn->io_capability == 0x03)) { /* If we're not the initiators request authorization to * proceed from user space (mgmt_user_confirm with @@ -3224,7 +3183,7 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev, } BT_DBG("Auto-accept of user confirmation with %ums delay", - hdev->auto_accept_delay); + hdev->auto_accept_delay); if (hdev->auto_accept_delay > 0) { int delay = msecs_to_jiffies(hdev->auto_accept_delay); @@ -3233,7 +3192,7 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev, } hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, - sizeof(ev->bdaddr), &ev->bdaddr); + sizeof(ev->bdaddr), &ev->bdaddr); goto unlock; } @@ -3245,8 +3204,8 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev, hci_dev_unlock(hdev); } -static void hci_user_passkey_request_evt(struct hci_dev *hdev, - struct sk_buff *skb) +static inline void hci_user_passkey_request_evt(struct hci_dev *hdev, + struct sk_buff *skb) { struct hci_ev_user_passkey_req *ev = (void *) skb->data; @@ -3260,8 +3219,7 @@ static void hci_user_passkey_request_evt(struct hci_dev *hdev, hci_dev_unlock(hdev); } -static void hci_simple_pair_complete_evt(struct hci_dev *hdev, - struct sk_buff *skb) +static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_simple_pair_complete *ev = (void *) skb->data; struct hci_conn *conn; @@ -3289,8 +3247,7 @@ static void hci_simple_pair_complete_evt(struct hci_dev *hdev, hci_dev_unlock(hdev); } -static void hci_remote_host_features_evt(struct hci_dev *hdev, - struct sk_buff *skb) +static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_remote_host_features *ev = (void *) skb->data; struct inquiry_entry *ie; @@ -3306,8 +3263,8 @@ static void hci_remote_host_features_evt(struct hci_dev *hdev, hci_dev_unlock(hdev); } -static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, - struct sk_buff *skb) +static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev, + struct sk_buff *skb) { struct hci_ev_remote_oob_data_request *ev = (void *) skb->data; struct oob_data *data; @@ -3328,20 +3285,20 @@ static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer)); hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp), - &cp); + &cp); } else { struct hci_cp_remote_oob_data_neg_reply cp; bacpy(&cp.bdaddr, &ev->bdaddr); hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp), - &cp); + &cp); } unlock: hci_dev_unlock(hdev); } -static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_le_conn_complete *ev = (void *) skb->data; struct hci_conn *conn; @@ -3350,19 +3307,6 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_lock(hdev); - if (ev->status) { - conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); - if (!conn) - goto unlock; - - mgmt_connect_failed(hdev, &conn->dst, conn->type, - conn->dst_type, ev->status); - hci_proto_connect_cfm(conn, ev->status); - conn->state = BT_CLOSED; - hci_conn_del(conn); - goto unlock; - } - conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr); if (!conn) { conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr); @@ -3375,6 +3319,15 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) conn->dst_type = ev->bdaddr_type; } + if (ev->status) { + mgmt_connect_failed(hdev, &ev->bdaddr, conn->type, + conn->dst_type, ev->status); + hci_proto_connect_cfm(conn, ev->status); + conn->state = BT_CLOSED; + hci_conn_del(conn); + goto unlock; + } + if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) mgmt_device_connected(hdev, &ev->bdaddr, conn->type, conn->dst_type, 0, NULL, 0, NULL); @@ -3392,7 +3345,8 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } -static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_le_adv_report_evt(struct hci_dev *hdev, + struct sk_buff *skb) { u8 num_reports = skb->data[0]; void *ptr = &skb->data[1]; @@ -3413,7 +3367,8 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } -static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_le_ltk_request_evt(struct hci_dev *hdev, + struct sk_buff *skb) { struct hci_ev_le_ltk_req *ev = (void *) skb->data; struct hci_cp_le_ltk_reply cp; @@ -3456,7 +3411,7 @@ static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } -static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_le_meta *le_ev = (void *) skb->data; @@ -3604,10 +3559,6 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) hci_extended_inquiry_result_evt(hdev, skb); break; - case HCI_EV_KEY_REFRESH_COMPLETE: - hci_key_refresh_complete_evt(hdev, skb); - break; - case HCI_EV_IO_CAPA_REQUEST: hci_io_capa_request_evt(hdev, skb); break; diff --git a/trunk/net/bluetooth/hci_sock.c b/trunk/net/bluetooth/hci_sock.c index a7f04de03d79..5914623f426a 100644 --- a/trunk/net/bluetooth/hci_sock.c +++ b/trunk/net/bluetooth/hci_sock.c @@ -24,7 +24,25 @@ /* Bluetooth HCI sockets. */ -#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include #include #include @@ -95,12 +113,11 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb) flt = &hci_pi(sk)->filter; if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ? - 0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS), - &flt->type_mask)) + 0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS), &flt->type_mask)) continue; if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) { - int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS); + register int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS); if (!hci_test_bit(evt, &flt->event_mask)) continue; @@ -223,8 +240,7 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb) struct hci_mon_hdr *hdr; /* Create a private copy with headroom */ - skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE, - GFP_ATOMIC); + skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC); if (!skb_copy) continue; @@ -479,8 +495,7 @@ static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg) } /* Ioctls that require bound socket */ -static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, - unsigned long arg) +static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg) { struct hci_dev *hdev = hci_pi(sk)->hdev; @@ -525,8 +540,7 @@ static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, } } -static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, - unsigned long arg) +static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; void __user *argp = (void __user *) arg; @@ -587,8 +601,7 @@ static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, } } -static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, - int addr_len) +static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) { struct sockaddr_hci haddr; struct sock *sk = sock->sk; @@ -677,8 +690,7 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, return err; } -static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, - int *addr_len, int peer) +static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer) { struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr; struct sock *sk = sock->sk; @@ -699,15 +711,13 @@ static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, return 0; } -static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, - struct sk_buff *skb) +static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) { __u32 mask = hci_pi(sk)->cmsg_mask; if (mask & HCI_CMSG_DIR) { int incoming = bt_cb(skb)->incoming; - put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming), - &incoming); + put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming), &incoming); } if (mask & HCI_CMSG_TSTAMP) { @@ -737,7 +747,7 @@ static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, } static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t len, int flags) + struct msghdr *msg, size_t len, int flags) { int noblock = flags & MSG_DONTWAIT; struct sock *sk = sock->sk; @@ -847,9 +857,8 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock, u16 ocf = hci_opcode_ocf(opcode); if (((ogf > HCI_SFLT_MAX_OGF) || - !hci_test_bit(ocf & HCI_FLT_OCF_BITS, - &hci_sec_filter.ocf_mask[ogf])) && - !capable(CAP_NET_RAW)) { + !hci_test_bit(ocf & HCI_FLT_OCF_BITS, &hci_sec_filter.ocf_mask[ogf])) && + !capable(CAP_NET_RAW)) { err = -EPERM; goto drop; } @@ -882,8 +891,7 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock, goto done; } -static int hci_sock_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int len) +static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int len) { struct hci_ufilter uf = { .opcode = 0 }; struct sock *sk = sock->sk; @@ -965,8 +973,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname, return err; } -static int hci_sock_getsockopt(struct socket *sock, int level, int optname, - char __user *optval, int __user *optlen) +static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct hci_ufilter uf; struct sock *sk = sock->sk; diff --git a/trunk/net/bluetooth/hci_sysfs.c b/trunk/net/bluetooth/hci_sysfs.c index a20e61c3653d..937f3187eafa 100644 --- a/trunk/net/bluetooth/hci_sysfs.c +++ b/trunk/net/bluetooth/hci_sysfs.c @@ -1,6 +1,10 @@ /* Bluetooth HCI driver model support. */ +#include +#include +#include #include +#include #include #include @@ -27,30 +31,27 @@ static inline char *link_typetostr(int type) } } -static ssize_t show_link_type(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t show_link_type(struct device *dev, struct device_attribute *attr, char *buf) { struct hci_conn *conn = to_hci_conn(dev); return sprintf(buf, "%s\n", link_typetostr(conn->type)); } -static ssize_t show_link_address(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t show_link_address(struct device *dev, struct device_attribute *attr, char *buf) { struct hci_conn *conn = to_hci_conn(dev); return sprintf(buf, "%s\n", batostr(&conn->dst)); } -static ssize_t show_link_features(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t show_link_features(struct device *dev, struct device_attribute *attr, char *buf) { struct hci_conn *conn = to_hci_conn(dev); return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", - conn->features[0], conn->features[1], - conn->features[2], conn->features[3], - conn->features[4], conn->features[5], - conn->features[6], conn->features[7]); + conn->features[0], conn->features[1], + conn->features[2], conn->features[3], + conn->features[4], conn->features[5], + conn->features[6], conn->features[7]); } #define LINK_ATTR(_name, _mode, _show, _store) \ @@ -184,22 +185,19 @@ static inline char *host_typetostr(int type) } } -static ssize_t show_bus(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t show_bus(struct device *dev, struct device_attribute *attr, char *buf) { struct hci_dev *hdev = to_hci_dev(dev); return sprintf(buf, "%s\n", host_bustostr(hdev->bus)); } -static ssize_t show_type(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t show_type(struct device *dev, struct device_attribute *attr, char *buf) { struct hci_dev *hdev = to_hci_dev(dev); return sprintf(buf, "%s\n", host_typetostr(hdev->dev_type)); } -static ssize_t show_name(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf) { struct hci_dev *hdev = to_hci_dev(dev); char name[HCI_MAX_NAME_LENGTH + 1]; @@ -212,64 +210,55 @@ static ssize_t show_name(struct device *dev, return sprintf(buf, "%s\n", name); } -static ssize_t show_class(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t show_class(struct device *dev, struct device_attribute *attr, char *buf) { struct hci_dev *hdev = to_hci_dev(dev); - return sprintf(buf, "0x%.2x%.2x%.2x\n", hdev->dev_class[2], - hdev->dev_class[1], hdev->dev_class[0]); + return sprintf(buf, "0x%.2x%.2x%.2x\n", + hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]); } -static ssize_t show_address(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t show_address(struct device *dev, struct device_attribute *attr, char *buf) { struct hci_dev *hdev = to_hci_dev(dev); return sprintf(buf, "%s\n", batostr(&hdev->bdaddr)); } -static ssize_t show_features(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t show_features(struct device *dev, struct device_attribute *attr, char *buf) { struct hci_dev *hdev = to_hci_dev(dev); return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", - hdev->features[0], hdev->features[1], - hdev->features[2], hdev->features[3], - hdev->features[4], hdev->features[5], - hdev->features[6], hdev->features[7]); + hdev->features[0], hdev->features[1], + hdev->features[2], hdev->features[3], + hdev->features[4], hdev->features[5], + hdev->features[6], hdev->features[7]); } -static ssize_t show_manufacturer(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t show_manufacturer(struct device *dev, struct device_attribute *attr, char *buf) { struct hci_dev *hdev = to_hci_dev(dev); return sprintf(buf, "%d\n", hdev->manufacturer); } -static ssize_t show_hci_version(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t show_hci_version(struct device *dev, struct device_attribute *attr, char *buf) { struct hci_dev *hdev = to_hci_dev(dev); return sprintf(buf, "%d\n", hdev->hci_ver); } -static ssize_t show_hci_revision(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t show_hci_revision(struct device *dev, struct device_attribute *attr, char *buf) { struct hci_dev *hdev = to_hci_dev(dev); return sprintf(buf, "%d\n", hdev->hci_rev); } -static ssize_t show_idle_timeout(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t show_idle_timeout(struct device *dev, struct device_attribute *attr, char *buf) { struct hci_dev *hdev = to_hci_dev(dev); return sprintf(buf, "%d\n", hdev->idle_timeout); } -static ssize_t store_idle_timeout(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hci_dev *hdev = to_hci_dev(dev); unsigned int val; @@ -287,16 +276,13 @@ static ssize_t store_idle_timeout(struct device *dev, return count; } -static ssize_t show_sniff_max_interval(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t show_sniff_max_interval(struct device *dev, struct device_attribute *attr, char *buf) { struct hci_dev *hdev = to_hci_dev(dev); return sprintf(buf, "%d\n", hdev->sniff_max_interval); } -static ssize_t store_sniff_max_interval(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t store_sniff_max_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hci_dev *hdev = to_hci_dev(dev); u16 val; @@ -314,16 +300,13 @@ static ssize_t store_sniff_max_interval(struct device *dev, return count; } -static ssize_t show_sniff_min_interval(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t show_sniff_min_interval(struct device *dev, struct device_attribute *attr, char *buf) { struct hci_dev *hdev = to_hci_dev(dev); return sprintf(buf, "%d\n", hdev->sniff_min_interval); } -static ssize_t store_sniff_min_interval(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t store_sniff_min_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hci_dev *hdev = to_hci_dev(dev); u16 val; @@ -352,11 +335,11 @@ static DEVICE_ATTR(hci_version, S_IRUGO, show_hci_version, NULL); static DEVICE_ATTR(hci_revision, S_IRUGO, show_hci_revision, NULL); static DEVICE_ATTR(idle_timeout, S_IRUGO | S_IWUSR, - show_idle_timeout, store_idle_timeout); + show_idle_timeout, store_idle_timeout); static DEVICE_ATTR(sniff_max_interval, S_IRUGO | S_IWUSR, - show_sniff_max_interval, store_sniff_max_interval); + show_sniff_max_interval, store_sniff_max_interval); static DEVICE_ATTR(sniff_min_interval, S_IRUGO | S_IWUSR, - show_sniff_min_interval, store_sniff_min_interval); + show_sniff_min_interval, store_sniff_min_interval); static struct attribute *bt_host_attrs[] = { &dev_attr_bus.attr, @@ -472,8 +455,8 @@ static void print_bt_uuid(struct seq_file *f, u8 *uuid) memcpy(&data5, &uuid[14], 2); seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.8x%.4x\n", - ntohl(data0), ntohs(data1), ntohs(data2), ntohs(data3), - ntohl(data4), ntohs(data5)); + ntohl(data0), ntohs(data1), ntohs(data2), + ntohs(data3), ntohl(data4), ntohs(data5)); } static int uuids_show(struct seq_file *f, void *p) @@ -530,7 +513,7 @@ static int auto_accept_delay_get(void *data, u64 *val) } DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get, - auto_accept_delay_set, "%llu\n"); + auto_accept_delay_set, "%llu\n"); void hci_init_sysfs(struct hci_dev *hdev) { @@ -564,15 +547,15 @@ int hci_add_sysfs(struct hci_dev *hdev) return 0; debugfs_create_file("inquiry_cache", 0444, hdev->debugfs, - hdev, &inquiry_cache_fops); + hdev, &inquiry_cache_fops); debugfs_create_file("blacklist", 0444, hdev->debugfs, - hdev, &blacklist_fops); + hdev, &blacklist_fops); debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops); debugfs_create_file("auto_accept_delay", 0444, hdev->debugfs, hdev, - &auto_accept_delay_fops); + &auto_accept_delay_fops); return 0; } diff --git a/trunk/net/bluetooth/hidp/core.c b/trunk/net/bluetooth/hidp/core.c index ccd985da6518..2c20d765b394 100644 --- a/trunk/net/bluetooth/hidp/core.c +++ b/trunk/net/bluetooth/hidp/core.c @@ -21,8 +21,27 @@ */ #include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include +#include #include +#include + +#include +#include #include #include @@ -225,8 +244,7 @@ static void hidp_input_report(struct hidp_session *session, struct sk_buff *skb) } static int __hidp_send_ctrl_message(struct hidp_session *session, - unsigned char hdr, unsigned char *data, - int size) + unsigned char hdr, unsigned char *data, int size) { struct sk_buff *skb; @@ -250,7 +268,7 @@ static int __hidp_send_ctrl_message(struct hidp_session *session, return 0; } -static int hidp_send_ctrl_message(struct hidp_session *session, +static inline int hidp_send_ctrl_message(struct hidp_session *session, unsigned char hdr, unsigned char *data, int size) { int err; @@ -453,7 +471,7 @@ static void hidp_set_timer(struct hidp_session *session) mod_timer(&session->timer, jiffies + HZ * session->idle_to); } -static void hidp_del_timer(struct hidp_session *session) +static inline void hidp_del_timer(struct hidp_session *session) { if (session->idle_to > 0) del_timer(&session->timer); diff --git a/trunk/net/bluetooth/hidp/sock.c b/trunk/net/bluetooth/hidp/sock.c index 18b3f6892a36..73a32d705c1f 100644 --- a/trunk/net/bluetooth/hidp/sock.c +++ b/trunk/net/bluetooth/hidp/sock.c @@ -20,8 +20,22 @@ SOFTWARE IS DISCLAIMED. */ -#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include +#include +#include #include "hidp.h" diff --git a/trunk/net/bluetooth/l2cap_core.c b/trunk/net/bluetooth/l2cap_core.c index 4ca88247b7c2..24f144b72a96 100644 --- a/trunk/net/bluetooth/l2cap_core.c +++ b/trunk/net/bluetooth/l2cap_core.c @@ -30,14 +30,32 @@ #include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include #include +#include + +#include #include #include #include #include -#include bool disable_ertm; @@ -55,9 +73,6 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data); static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err); -static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control, - struct sk_buff_head *skbs, u8 event); - /* ---- L2CAP channels ---- */ static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid) @@ -181,7 +196,7 @@ static void __l2cap_state_change(struct l2cap_chan *chan, int state) state_to_string(state)); chan->state = state; - chan->ops->state_change(chan, state); + chan->ops->state_change(chan->data, state); } static void l2cap_state_change(struct l2cap_chan *chan, int state) @@ -209,37 +224,6 @@ static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err) release_sock(sk); } -static void __set_retrans_timer(struct l2cap_chan *chan) -{ - if (!delayed_work_pending(&chan->monitor_timer) && - chan->retrans_timeout) { - l2cap_set_timer(chan, &chan->retrans_timer, - msecs_to_jiffies(chan->retrans_timeout)); - } -} - -static void __set_monitor_timer(struct l2cap_chan *chan) -{ - __clear_retrans_timer(chan); - if (chan->monitor_timeout) { - l2cap_set_timer(chan, &chan->monitor_timer, - msecs_to_jiffies(chan->monitor_timeout)); - } -} - -static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head, - u16 seq) -{ - struct sk_buff *skb; - - skb_queue_walk(head, skb) { - if (bt_cb(skb)->control.txseq == seq) - return skb; - } - - return NULL; -} - /* ---- L2CAP sequence number lists ---- */ /* For ERTM, ordered lists of sequence numbers must be tracked for @@ -382,7 +366,7 @@ static void l2cap_chan_timeout(struct work_struct *work) l2cap_chan_unlock(chan); - chan->ops->close(chan); + chan->ops->close(chan->data); mutex_unlock(&conn->chan_lock); l2cap_chan_put(chan); @@ -408,9 +392,6 @@ struct l2cap_chan *l2cap_chan_create(void) atomic_set(&chan->refcnt, 1); - /* This flag is cleared in l2cap_chan_ready() */ - set_bit(CONF_NOT_COMPLETE, &chan->conf_state); - BT_DBG("chan %p", chan); return chan; @@ -449,7 +430,7 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) case L2CAP_CHAN_CONN_ORIENTED: if (conn->hcon->type == LE_LINK) { /* LE connection */ - chan->omtu = L2CAP_DEFAULT_MTU; + chan->omtu = L2CAP_LE_DEFAULT_MTU; chan->scid = L2CAP_CID_LE_DATA; chan->dcid = L2CAP_CID_LE_DATA; } else { @@ -466,13 +447,6 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) chan->omtu = L2CAP_DEFAULT_MTU; break; - case L2CAP_CHAN_CONN_FIX_A2MP: - chan->scid = L2CAP_CID_A2MP; - chan->dcid = L2CAP_CID_A2MP; - chan->omtu = L2CAP_A2MP_DEFAULT_MTU; - chan->imtu = L2CAP_A2MP_DEFAULT_MTU; - break; - default: /* Raw socket can send/recv signalling messages only */ chan->scid = L2CAP_CID_SIGNALING; @@ -492,16 +466,18 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) list_add(&chan->list, &conn->chan_l); } -void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) +static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) { mutex_lock(&conn->chan_lock); __l2cap_chan_add(conn, chan); mutex_unlock(&conn->chan_lock); } -void l2cap_chan_del(struct l2cap_chan *chan, int err) +static void l2cap_chan_del(struct l2cap_chan *chan, int err) { + struct sock *sk = chan->sk; struct l2cap_conn *conn = chan->conn; + struct sock *parent = bt_sk(sk)->parent; __clear_chan_timer(chan); @@ -514,22 +490,34 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err) l2cap_chan_put(chan); chan->conn = NULL; - - if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP) - hci_conn_put(conn->hcon); + hci_conn_put(conn->hcon); } - if (chan->ops->teardown) - chan->ops->teardown(chan, err); + lock_sock(sk); + + __l2cap_state_change(chan, BT_CLOSED); + sock_set_flag(sk, SOCK_ZAPPED); + + if (err) + __l2cap_chan_set_err(chan, err); + + if (parent) { + bt_accept_unlink(sk); + parent->sk_data_ready(parent, 0); + } else + sk->sk_state_change(sk); + + release_sock(sk); - if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state)) + if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) && + test_bit(CONF_INPUT_DONE, &chan->conf_state))) return; - switch(chan->mode) { - case L2CAP_MODE_BASIC: - break; + skb_queue_purge(&chan->tx_q); + + if (chan->mode == L2CAP_MODE_ERTM) { + struct srej_list *l, *tmp; - case L2CAP_MODE_ERTM: __clear_retrans_timer(chan); __clear_monitor_timer(chan); __clear_ack_timer(chan); @@ -538,15 +526,30 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err) l2cap_seq_list_free(&chan->srej_list); l2cap_seq_list_free(&chan->retrans_list); + list_for_each_entry_safe(l, tmp, &chan->srej_l, list) { + list_del(&l->list); + kfree(l); + } + } +} - /* fall through */ +static void l2cap_chan_cleanup_listen(struct sock *parent) +{ + struct sock *sk; - case L2CAP_MODE_STREAMING: - skb_queue_purge(&chan->tx_q); - break; - } + BT_DBG("parent %p", parent); + + /* Close not yet accepted channels */ + while ((sk = bt_accept_dequeue(parent, NULL))) { + struct l2cap_chan *chan = l2cap_pi(sk)->chan; - return; + l2cap_chan_lock(chan); + __clear_chan_timer(chan); + l2cap_chan_close(chan, ECONNRESET); + l2cap_chan_unlock(chan); + + chan->ops->close(chan->data); + } } void l2cap_chan_close(struct l2cap_chan *chan, int reason) @@ -559,8 +562,12 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason) switch (chan->state) { case BT_LISTEN: - if (chan->ops->teardown) - chan->ops->teardown(chan, 0); + lock_sock(sk); + l2cap_chan_cleanup_listen(sk); + + __l2cap_state_change(chan, BT_CLOSED); + sock_set_flag(sk, SOCK_ZAPPED); + release_sock(sk); break; case BT_CONNECTED: @@ -588,7 +595,7 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason) rsp.scid = cpu_to_le16(chan->dcid); rsp.dcid = cpu_to_le16(chan->scid); rsp.result = cpu_to_le16(result); - rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO); + rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp); } @@ -602,8 +609,9 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason) break; default: - if (chan->ops->teardown) - chan->ops->teardown(chan, 0); + lock_sock(sk); + sock_set_flag(sk, SOCK_ZAPPED); + release_sock(sk); break; } } @@ -619,7 +627,7 @@ static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan) default: return HCI_AT_NO_BONDING; } - } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) { + } else if (chan->psm == cpu_to_le16(0x0001)) { if (chan->sec_level == BT_SECURITY_LOW) chan->sec_level = BT_SECURITY_SDP; @@ -765,11 +773,9 @@ static inline void __unpack_control(struct l2cap_chan *chan, if (test_bit(FLAG_EXT_CTRL, &chan->flags)) { __unpack_extended_control(get_unaligned_le32(skb->data), &bt_cb(skb)->control); - skb_pull(skb, L2CAP_EXT_CTRL_SIZE); } else { __unpack_enhanced_control(get_unaligned_le16(skb->data), &bt_cb(skb)->control); - skb_pull(skb, L2CAP_ENH_CTRL_SIZE); } } @@ -824,102 +830,66 @@ static inline void __pack_control(struct l2cap_chan *chan, } } -static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan) -{ - if (test_bit(FLAG_EXT_CTRL, &chan->flags)) - return L2CAP_EXT_HDR_SIZE; - else - return L2CAP_ENH_HDR_SIZE; -} - -static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan, - u32 control) +static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control) { struct sk_buff *skb; struct l2cap_hdr *lh; - int hlen = __ertm_hdr_size(chan); + struct l2cap_conn *conn = chan->conn; + int count, hlen; + + if (chan->state != BT_CONNECTED) + return; + + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + hlen = L2CAP_EXT_HDR_SIZE; + else + hlen = L2CAP_ENH_HDR_SIZE; if (chan->fcs == L2CAP_FCS_CRC16) hlen += L2CAP_FCS_SIZE; - skb = bt_skb_alloc(hlen, GFP_KERNEL); + BT_DBG("chan %p, control 0x%8.8x", chan, control); + + count = min_t(unsigned int, conn->mtu, hlen); + + control |= __set_sframe(chan); + + if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) + control |= __set_ctrl_final(chan); + if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state)) + control |= __set_ctrl_poll(chan); + + skb = bt_skb_alloc(count, GFP_ATOMIC); if (!skb) - return ERR_PTR(-ENOMEM); + return; lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE); lh->cid = cpu_to_le16(chan->dcid); - if (test_bit(FLAG_EXT_CTRL, &chan->flags)) - put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE)); - else - put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE)); + __put_control(chan, control, skb_put(skb, __ctrl_size(chan))); if (chan->fcs == L2CAP_FCS_CRC16) { - u16 fcs = crc16(0, (u8 *)skb->data, skb->len); + u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE); put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE)); } skb->priority = HCI_PRIO_MAX; - return skb; + l2cap_do_send(chan, skb); } -static void l2cap_send_sframe(struct l2cap_chan *chan, - struct l2cap_ctrl *control) +static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control) { - struct sk_buff *skb; - u32 control_field; - - BT_DBG("chan %p, control %p", chan, control); - - if (!control->sframe) - return; - - if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) && - !control->poll) - control->final = 1; - - if (control->super == L2CAP_SUPER_RR) - clear_bit(CONN_RNR_SENT, &chan->conn_state); - else if (control->super == L2CAP_SUPER_RNR) + if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { + control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR); set_bit(CONN_RNR_SENT, &chan->conn_state); + } else + control |= __set_ctrl_super(chan, L2CAP_SUPER_RR); - if (control->super != L2CAP_SUPER_SREJ) { - chan->last_acked_seq = control->reqseq; - __clear_ack_timer(chan); - } - - BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq, - control->final, control->poll, control->super); - - if (test_bit(FLAG_EXT_CTRL, &chan->flags)) - control_field = __pack_extended_control(control); - else - control_field = __pack_enhanced_control(control); - - skb = l2cap_create_sframe_pdu(chan, control_field); - if (!IS_ERR(skb)) - l2cap_do_send(chan, skb); -} - -static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll) -{ - struct l2cap_ctrl control; - - BT_DBG("chan %p, poll %d", chan, poll); - - memset(&control, 0, sizeof(control)); - control.sframe = 1; - control.poll = poll; - - if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) - control.super = L2CAP_SUPER_RNR; - else - control.super = L2CAP_SUPER_RR; + control |= __set_reqseq(chan, chan->buffer_seq); - control.reqseq = chan->buffer_seq; - l2cap_send_sframe(chan, &control); + l2cap_send_sframe(chan, control); } static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan) @@ -944,13 +914,25 @@ static void l2cap_send_conn_req(struct l2cap_chan *chan) static void l2cap_chan_ready(struct l2cap_chan *chan) { - /* This clears all conf flags, including CONF_NOT_COMPLETE */ + struct sock *sk = chan->sk; + struct sock *parent; + + lock_sock(sk); + + parent = bt_sk(sk)->parent; + + BT_DBG("sk %p, parent %p", sk, parent); + chan->conf_state = 0; __clear_chan_timer(chan); - chan->state = BT_CONNECTED; + __l2cap_state_change(chan, BT_CONNECTED); + sk->sk_state_change(sk); + + if (parent) + parent->sk_data_ready(parent, 0); - chan->ops->ready(chan); + release_sock(sk); } static void l2cap_do_start(struct l2cap_chan *chan) @@ -971,7 +953,7 @@ static void l2cap_do_start(struct l2cap_chan *chan) l2cap_send_conn_req(chan); } else { struct l2cap_info_req req; - req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK); + req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; conn->info_ident = l2cap_get_ident(conn); @@ -1013,11 +995,6 @@ static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *c __clear_ack_timer(chan); } - if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) { - __l2cap_state_change(chan, BT_DISCONN); - return; - } - req.dcid = cpu_to_le16(chan->dcid); req.scid = cpu_to_le16(chan->scid); l2cap_send_cmd(conn, l2cap_get_ident(conn), @@ -1076,20 +1053,20 @@ static void l2cap_conn_start(struct l2cap_conn *conn) if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { struct sock *parent = bt_sk(sk)->parent; - rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND); - rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND); + rsp.result = cpu_to_le16(L2CAP_CR_PEND); + rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND); if (parent) parent->sk_data_ready(parent, 0); } else { __l2cap_state_change(chan, BT_CONFIG); - rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS); - rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO); + rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); + rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); } release_sock(sk); } else { - rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND); - rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND); + rsp.result = cpu_to_le16(L2CAP_CR_PEND); + rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND); } l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, @@ -1173,7 +1150,13 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn) lock_sock(parent); - chan = pchan->ops->new_connection(pchan); + /* Check for backlog size */ + if (sk_acceptq_is_full(parent)) { + BT_DBG("backlog full %d", parent->sk_ack_backlog); + goto clean; + } + + chan = pchan->ops->new_connection(pchan->data); if (!chan) goto clean; @@ -1188,7 +1171,10 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn) l2cap_chan_add(conn, chan); - l2cap_chan_ready(chan); + __set_chan_timer(chan, sk->sk_sndtimeo); + + __l2cap_state_change(chan, BT_CONNECTED); + parent->sk_data_ready(parent, 0); clean: release_sock(parent); @@ -1212,11 +1198,6 @@ static void l2cap_conn_ready(struct l2cap_conn *conn) l2cap_chan_lock(chan); - if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) { - l2cap_chan_unlock(chan); - continue; - } - if (conn->hcon->type == LE_LINK) { if (smp_conn_security(conn, chan->sec_level)) l2cap_chan_ready(chan); @@ -1289,7 +1270,7 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err) l2cap_chan_unlock(chan); - chan->ops->close(chan); + chan->ops->close(chan->data); l2cap_chan_put(chan); } @@ -1314,12 +1295,7 @@ static void security_timeout(struct work_struct *work) struct l2cap_conn *conn = container_of(work, struct l2cap_conn, security_timer.work); - BT_DBG("conn %p", conn); - - if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) { - smp_chan_destroy(conn); - l2cap_conn_del(conn->hcon, ETIMEDOUT); - } + l2cap_conn_del(conn->hcon, ETIMEDOUT); } static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status) @@ -1463,17 +1439,21 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, goto done; } - switch (chan->state) { + lock_sock(sk); + + switch (sk->sk_state) { case BT_CONNECT: case BT_CONNECT2: case BT_CONFIG: /* Already connecting */ err = 0; + release_sock(sk); goto done; case BT_CONNECTED: /* Already connected */ err = -EISCONN; + release_sock(sk); goto done; case BT_OPEN: @@ -1483,12 +1463,13 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, default: err = -EBADFD; + release_sock(sk); goto done; } /* Set destination address and psm */ - lock_sock(sk); bacpy(&bt_sk(sk)->dst, dst); + release_sock(sk); chan->psm = psm; @@ -1590,20 +1571,23 @@ int __l2cap_wait_ack(struct sock *sk) static void l2cap_monitor_timeout(struct work_struct *work) { struct l2cap_chan *chan = container_of(work, struct l2cap_chan, - monitor_timer.work); + monitor_timer.work); BT_DBG("chan %p", chan); l2cap_chan_lock(chan); - if (!chan->conn) { + if (chan->retry_count >= chan->remote_max_tx) { + l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED); l2cap_chan_unlock(chan); l2cap_chan_put(chan); return; } - l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO); + chan->retry_count++; + __set_monitor_timer(chan); + l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL); l2cap_chan_unlock(chan); l2cap_chan_put(chan); } @@ -1611,293 +1595,234 @@ static void l2cap_monitor_timeout(struct work_struct *work) static void l2cap_retrans_timeout(struct work_struct *work) { struct l2cap_chan *chan = container_of(work, struct l2cap_chan, - retrans_timer.work); + retrans_timer.work); BT_DBG("chan %p", chan); l2cap_chan_lock(chan); - if (!chan->conn) { - l2cap_chan_unlock(chan); - l2cap_chan_put(chan); - return; - } + chan->retry_count = 1; + __set_monitor_timer(chan); + + set_bit(CONN_WAIT_F, &chan->conn_state); + + l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL); - l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO); l2cap_chan_unlock(chan); l2cap_chan_put(chan); } -static void l2cap_streaming_send(struct l2cap_chan *chan, - struct sk_buff_head *skbs) +static void l2cap_drop_acked_frames(struct l2cap_chan *chan) { struct sk_buff *skb; - struct l2cap_ctrl *control; - - BT_DBG("chan %p, skbs %p", chan, skbs); - skb_queue_splice_tail_init(skbs, &chan->tx_q); - - while (!skb_queue_empty(&chan->tx_q)) { + while ((skb = skb_peek(&chan->tx_q)) && + chan->unacked_frames) { + if (bt_cb(skb)->control.txseq == chan->expected_ack_seq) + break; skb = skb_dequeue(&chan->tx_q); + kfree_skb(skb); + + chan->unacked_frames--; + } - bt_cb(skb)->control.retries = 1; - control = &bt_cb(skb)->control; + if (!chan->unacked_frames) + __clear_retrans_timer(chan); +} - control->reqseq = 0; - control->txseq = chan->next_tx_seq; +static void l2cap_streaming_send(struct l2cap_chan *chan) +{ + struct sk_buff *skb; + u32 control; + u16 fcs; - __pack_control(chan, control, skb); + while ((skb = skb_dequeue(&chan->tx_q))) { + control = __get_control(chan, skb->data + L2CAP_HDR_SIZE); + control |= __set_txseq(chan, chan->next_tx_seq); + control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar); + __put_control(chan, control, skb->data + L2CAP_HDR_SIZE); if (chan->fcs == L2CAP_FCS_CRC16) { - u16 fcs = crc16(0, (u8 *) skb->data, skb->len); - put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE)); + fcs = crc16(0, (u8 *)skb->data, + skb->len - L2CAP_FCS_SIZE); + put_unaligned_le16(fcs, + skb->data + skb->len - L2CAP_FCS_SIZE); } l2cap_do_send(chan, skb); - BT_DBG("Sent txseq %d", (int)control->txseq); - chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq); - chan->frames_sent++; } } -static int l2cap_ertm_send(struct l2cap_chan *chan) +static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq) { struct sk_buff *skb, *tx_skb; - struct l2cap_ctrl *control; - int sent = 0; - - BT_DBG("chan %p", chan); - - if (chan->state != BT_CONNECTED) - return -ENOTCONN; - - if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) - return 0; - - while (chan->tx_send_head && - chan->unacked_frames < chan->remote_tx_win && - chan->tx_state == L2CAP_TX_STATE_XMIT) { - - skb = chan->tx_send_head; - - bt_cb(skb)->control.retries = 1; - control = &bt_cb(skb)->control; + u16 fcs; + u32 control; - if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) - control->final = 1; + skb = skb_peek(&chan->tx_q); + if (!skb) + return; - control->reqseq = chan->buffer_seq; - chan->last_acked_seq = chan->buffer_seq; - control->txseq = chan->next_tx_seq; + while (bt_cb(skb)->control.txseq != tx_seq) { + if (skb_queue_is_last(&chan->tx_q, skb)) + return; - __pack_control(chan, control, skb); + skb = skb_queue_next(&chan->tx_q, skb); + } - if (chan->fcs == L2CAP_FCS_CRC16) { - u16 fcs = crc16(0, (u8 *) skb->data, skb->len); - put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE)); - } + if (bt_cb(skb)->control.retries == chan->remote_max_tx && + chan->remote_max_tx) { + l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED); + return; + } - /* Clone after data has been modified. Data is assumed to be - read-only (for locking purposes) on cloned sk_buffs. - */ - tx_skb = skb_clone(skb, GFP_KERNEL); + tx_skb = skb_clone(skb, GFP_ATOMIC); + bt_cb(skb)->control.retries++; - if (!tx_skb) - break; + control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE); + control &= __get_sar_mask(chan); - __set_retrans_timer(chan); + if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) + control |= __set_ctrl_final(chan); - chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq); - chan->unacked_frames++; - chan->frames_sent++; - sent++; + control |= __set_reqseq(chan, chan->buffer_seq); + control |= __set_txseq(chan, tx_seq); - if (skb_queue_is_last(&chan->tx_q, skb)) - chan->tx_send_head = NULL; - else - chan->tx_send_head = skb_queue_next(&chan->tx_q, skb); + __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE); - l2cap_do_send(chan, tx_skb); - BT_DBG("Sent txseq %d", (int)control->txseq); + if (chan->fcs == L2CAP_FCS_CRC16) { + fcs = crc16(0, (u8 *)tx_skb->data, + tx_skb->len - L2CAP_FCS_SIZE); + put_unaligned_le16(fcs, + tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE); } - BT_DBG("Sent %d, %d unacked, %d in ERTM queue", sent, - (int) chan->unacked_frames, skb_queue_len(&chan->tx_q)); - - return sent; + l2cap_do_send(chan, tx_skb); } -static void l2cap_ertm_resend(struct l2cap_chan *chan) +static int l2cap_ertm_send(struct l2cap_chan *chan) { - struct l2cap_ctrl control; - struct sk_buff *skb; - struct sk_buff *tx_skb; - u16 seq; + struct sk_buff *skb, *tx_skb; + u16 fcs; + u32 control; + int nsent = 0; - BT_DBG("chan %p", chan); + if (chan->state != BT_CONNECTED) + return -ENOTCONN; if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) - return; + return 0; - while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) { - seq = l2cap_seq_list_pop(&chan->retrans_list); + while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) { - skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq); - if (!skb) { - BT_DBG("Error: Can't retransmit seq %d, frame missing", - seq); - continue; + if (bt_cb(skb)->control.retries == chan->remote_max_tx && + chan->remote_max_tx) { + l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED); + break; } + tx_skb = skb_clone(skb, GFP_ATOMIC); + bt_cb(skb)->control.retries++; - control = bt_cb(skb)->control; - if (chan->max_tx != 0 && - bt_cb(skb)->control.retries > chan->max_tx) { - BT_DBG("Retry limit exceeded (%d)", chan->max_tx); - l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); - l2cap_seq_list_clear(&chan->retrans_list); - break; - } + control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE); + control &= __get_sar_mask(chan); - control.reqseq = chan->buffer_seq; if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) - control.final = 1; - else - control.final = 0; - - if (skb_cloned(skb)) { - /* Cloned sk_buffs are read-only, so we need a - * writeable copy - */ - tx_skb = skb_copy(skb, GFP_ATOMIC); - } else { - tx_skb = skb_clone(skb, GFP_ATOMIC); - } + control |= __set_ctrl_final(chan); - if (!tx_skb) { - l2cap_seq_list_clear(&chan->retrans_list); - break; - } + control |= __set_reqseq(chan, chan->buffer_seq); + control |= __set_txseq(chan, chan->next_tx_seq); + control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar); - /* Update skb contents */ - if (test_bit(FLAG_EXT_CTRL, &chan->flags)) { - put_unaligned_le32(__pack_extended_control(&control), - tx_skb->data + L2CAP_HDR_SIZE); - } else { - put_unaligned_le16(__pack_enhanced_control(&control), - tx_skb->data + L2CAP_HDR_SIZE); - } + __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE); if (chan->fcs == L2CAP_FCS_CRC16) { - u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len); - put_unaligned_le16(fcs, skb_put(tx_skb, - L2CAP_FCS_SIZE)); + fcs = crc16(0, (u8 *)skb->data, + tx_skb->len - L2CAP_FCS_SIZE); + put_unaligned_le16(fcs, skb->data + + tx_skb->len - L2CAP_FCS_SIZE); } l2cap_do_send(chan, tx_skb); - BT_DBG("Resent txseq %d", control.txseq); + __set_retrans_timer(chan); + + bt_cb(skb)->control.txseq = chan->next_tx_seq; + + chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq); + + if (bt_cb(skb)->control.retries == 1) { + chan->unacked_frames++; + + if (!nsent++) + __clear_ack_timer(chan); + } + + chan->frames_sent++; - chan->last_acked_seq = chan->buffer_seq; + if (skb_queue_is_last(&chan->tx_q, skb)) + chan->tx_send_head = NULL; + else + chan->tx_send_head = skb_queue_next(&chan->tx_q, skb); } + + return nsent; } -static void l2cap_retransmit(struct l2cap_chan *chan, - struct l2cap_ctrl *control) +static int l2cap_retransmit_frames(struct l2cap_chan *chan) { - BT_DBG("chan %p, control %p", chan, control); + int ret; - l2cap_seq_list_append(&chan->retrans_list, control->reqseq); - l2cap_ertm_resend(chan); + if (!skb_queue_empty(&chan->tx_q)) + chan->tx_send_head = chan->tx_q.next; + + chan->next_tx_seq = chan->expected_ack_seq; + ret = l2cap_ertm_send(chan); + return ret; } -static void l2cap_retransmit_all(struct l2cap_chan *chan, - struct l2cap_ctrl *control) +static void __l2cap_send_ack(struct l2cap_chan *chan) { - struct sk_buff *skb; - - BT_DBG("chan %p, control %p", chan, control); - - if (control->poll) - set_bit(CONN_SEND_FBIT, &chan->conn_state); + u32 control = 0; - l2cap_seq_list_clear(&chan->retrans_list); + control |= __set_reqseq(chan, chan->buffer_seq); - if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) + if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { + control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR); + set_bit(CONN_RNR_SENT, &chan->conn_state); + l2cap_send_sframe(chan, control); return; + } - if (chan->unacked_frames) { - skb_queue_walk(&chan->tx_q, skb) { - if (bt_cb(skb)->control.txseq == control->reqseq || - skb == chan->tx_send_head) - break; - } - - skb_queue_walk_from(&chan->tx_q, skb) { - if (skb == chan->tx_send_head) - break; - - l2cap_seq_list_append(&chan->retrans_list, - bt_cb(skb)->control.txseq); - } + if (l2cap_ertm_send(chan) > 0) + return; - l2cap_ertm_resend(chan); - } + control |= __set_ctrl_super(chan, L2CAP_SUPER_RR); + l2cap_send_sframe(chan, control); } static void l2cap_send_ack(struct l2cap_chan *chan) { - struct l2cap_ctrl control; - u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq, - chan->last_acked_seq); - int threshold; - - BT_DBG("chan %p last_acked_seq %d buffer_seq %d", - chan, chan->last_acked_seq, chan->buffer_seq); + __clear_ack_timer(chan); + __l2cap_send_ack(chan); +} - memset(&control, 0, sizeof(control)); - control.sframe = 1; +static void l2cap_send_srejtail(struct l2cap_chan *chan) +{ + struct srej_list *tail; + u32 control; - if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) && - chan->rx_state == L2CAP_RX_STATE_RECV) { - __clear_ack_timer(chan); - control.super = L2CAP_SUPER_RNR; - control.reqseq = chan->buffer_seq; - l2cap_send_sframe(chan, &control); - } else { - if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) { - l2cap_ertm_send(chan); - /* If any i-frames were sent, they included an ack */ - if (chan->buffer_seq == chan->last_acked_seq) - frames_to_ack = 0; - } + control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ); + control |= __set_ctrl_final(chan); - /* Ack now if the tx window is 3/4ths full. - * Calculate without mul or div - */ - threshold = chan->tx_win; - threshold += threshold << 1; - threshold >>= 2; - - BT_DBG("frames_to_ack %d, threshold %d", (int)frames_to_ack, - threshold); - - if (frames_to_ack >= threshold) { - __clear_ack_timer(chan); - control.super = L2CAP_SUPER_RR; - control.reqseq = chan->buffer_seq; - l2cap_send_sframe(chan, &control); - frames_to_ack = 0; - } + tail = list_entry((&chan->srej_l)->prev, struct srej_list, list); + control |= __set_reqseq(chan, tail->tx_seq); - if (frames_to_ack) - __set_ack_timer(chan); - } + l2cap_send_sframe(chan, control); } static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan, @@ -2026,7 +1951,10 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, if (!conn) return ERR_PTR(-ENOTCONN); - hlen = __ertm_hdr_size(chan); + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + hlen = L2CAP_EXT_HDR_SIZE; + else + hlen = L2CAP_ENH_HDR_SIZE; if (sdulen) hlen += L2CAP_SDULEN_SIZE; @@ -2046,11 +1974,7 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, lh->cid = cpu_to_le16(chan->dcid); lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); - /* Control header is populated later */ - if (test_bit(FLAG_EXT_CTRL, &chan->flags)) - put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE)); - else - put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE)); + __put_control(chan, 0, skb_put(skb, __ctrl_size(chan))); if (sdulen) put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE)); @@ -2061,8 +1985,10 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, return ERR_PTR(err); } - bt_cb(skb)->control.fcs = chan->fcs; - bt_cb(skb)->control.retries = 0; + if (chan->fcs == L2CAP_FCS_CRC16) + put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE)); + + bt_cb(skb)->control.retries = 0; return skb; } @@ -2073,6 +1999,7 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan, struct sk_buff *skb; u16 sdu_len; size_t pdu_len; + int err = 0; u8 sar; BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len); @@ -2088,10 +2015,7 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan, pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD); /* Adjust for largest possible L2CAP overhead. */ - if (chan->fcs) - pdu_len -= L2CAP_FCS_SIZE; - - pdu_len -= __ertm_hdr_size(chan); + pdu_len -= L2CAP_EXT_HDR_SIZE + L2CAP_FCS_SIZE; /* Remote device may have requested smaller PDUs */ pdu_len = min_t(size_t, pdu_len, chan->remote_mps); @@ -2131,7 +2055,7 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan, } } - return 0; + return err; } int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len, @@ -2193,12 +2117,17 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len, if (err) break; + if (chan->mode == L2CAP_MODE_ERTM && chan->tx_send_head == NULL) + chan->tx_send_head = seg_queue.next; + skb_queue_splice_tail_init(&seg_queue, &chan->tx_q); + if (chan->mode == L2CAP_MODE_ERTM) - l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST); + err = l2cap_ertm_send(chan); else - l2cap_streaming_send(chan, &seg_queue); + l2cap_streaming_send(chan); - err = len; + if (err >= 0) + err = len; /* If the skbs were not queued for sending, they'll still be in * seg_queue and need to be purged. @@ -2214,296 +2143,6 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len, return err; } -static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq) -{ - struct l2cap_ctrl control; - u16 seq; - - BT_DBG("chan %p, txseq %d", chan, txseq); - - memset(&control, 0, sizeof(control)); - control.sframe = 1; - control.super = L2CAP_SUPER_SREJ; - - for (seq = chan->expected_tx_seq; seq != txseq; - seq = __next_seq(chan, seq)) { - if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) { - control.reqseq = seq; - l2cap_send_sframe(chan, &control); - l2cap_seq_list_append(&chan->srej_list, seq); - } - } - - chan->expected_tx_seq = __next_seq(chan, txseq); -} - -static void l2cap_send_srej_tail(struct l2cap_chan *chan) -{ - struct l2cap_ctrl control; - - BT_DBG("chan %p", chan); - - if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR) - return; - - memset(&control, 0, sizeof(control)); - control.sframe = 1; - control.super = L2CAP_SUPER_SREJ; - control.reqseq = chan->srej_list.tail; - l2cap_send_sframe(chan, &control); -} - -static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq) -{ - struct l2cap_ctrl control; - u16 initial_head; - u16 seq; - - BT_DBG("chan %p, txseq %d", chan, txseq); - - memset(&control, 0, sizeof(control)); - control.sframe = 1; - control.super = L2CAP_SUPER_SREJ; - - /* Capture initial list head to allow only one pass through the list. */ - initial_head = chan->srej_list.head; - - do { - seq = l2cap_seq_list_pop(&chan->srej_list); - if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR) - break; - - control.reqseq = seq; - l2cap_send_sframe(chan, &control); - l2cap_seq_list_append(&chan->srej_list, seq); - } while (chan->srej_list.head != initial_head); -} - -static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq) -{ - struct sk_buff *acked_skb; - u16 ackseq; - - BT_DBG("chan %p, reqseq %d", chan, reqseq); - - if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq) - return; - - BT_DBG("expected_ack_seq %d, unacked_frames %d", - chan->expected_ack_seq, chan->unacked_frames); - - for (ackseq = chan->expected_ack_seq; ackseq != reqseq; - ackseq = __next_seq(chan, ackseq)) { - - acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq); - if (acked_skb) { - skb_unlink(acked_skb, &chan->tx_q); - kfree_skb(acked_skb); - chan->unacked_frames--; - } - } - - chan->expected_ack_seq = reqseq; - - if (chan->unacked_frames == 0) - __clear_retrans_timer(chan); - - BT_DBG("unacked_frames %d", (int) chan->unacked_frames); -} - -static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan) -{ - BT_DBG("chan %p", chan); - - chan->expected_tx_seq = chan->buffer_seq; - l2cap_seq_list_clear(&chan->srej_list); - skb_queue_purge(&chan->srej_q); - chan->rx_state = L2CAP_RX_STATE_RECV; -} - -static void l2cap_tx_state_xmit(struct l2cap_chan *chan, - struct l2cap_ctrl *control, - struct sk_buff_head *skbs, u8 event) -{ - BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs, - event); - - switch (event) { - case L2CAP_EV_DATA_REQUEST: - if (chan->tx_send_head == NULL) - chan->tx_send_head = skb_peek(skbs); - - skb_queue_splice_tail_init(skbs, &chan->tx_q); - l2cap_ertm_send(chan); - break; - case L2CAP_EV_LOCAL_BUSY_DETECTED: - BT_DBG("Enter LOCAL_BUSY"); - set_bit(CONN_LOCAL_BUSY, &chan->conn_state); - - if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) { - /* The SREJ_SENT state must be aborted if we are to - * enter the LOCAL_BUSY state. - */ - l2cap_abort_rx_srej_sent(chan); - } - - l2cap_send_ack(chan); - - break; - case L2CAP_EV_LOCAL_BUSY_CLEAR: - BT_DBG("Exit LOCAL_BUSY"); - clear_bit(CONN_LOCAL_BUSY, &chan->conn_state); - - if (test_bit(CONN_RNR_SENT, &chan->conn_state)) { - struct l2cap_ctrl local_control; - - memset(&local_control, 0, sizeof(local_control)); - local_control.sframe = 1; - local_control.super = L2CAP_SUPER_RR; - local_control.poll = 1; - local_control.reqseq = chan->buffer_seq; - l2cap_send_sframe(chan, &local_control); - - chan->retry_count = 1; - __set_monitor_timer(chan); - chan->tx_state = L2CAP_TX_STATE_WAIT_F; - } - break; - case L2CAP_EV_RECV_REQSEQ_AND_FBIT: - l2cap_process_reqseq(chan, control->reqseq); - break; - case L2CAP_EV_EXPLICIT_POLL: - l2cap_send_rr_or_rnr(chan, 1); - chan->retry_count = 1; - __set_monitor_timer(chan); - __clear_ack_timer(chan); - chan->tx_state = L2CAP_TX_STATE_WAIT_F; - break; - case L2CAP_EV_RETRANS_TO: - l2cap_send_rr_or_rnr(chan, 1); - chan->retry_count = 1; - __set_monitor_timer(chan); - chan->tx_state = L2CAP_TX_STATE_WAIT_F; - break; - case L2CAP_EV_RECV_FBIT: - /* Nothing to process */ - break; - default: - break; - } -} - -static void l2cap_tx_state_wait_f(struct l2cap_chan *chan, - struct l2cap_ctrl *control, - struct sk_buff_head *skbs, u8 event) -{ - BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs, - event); - - switch (event) { - case L2CAP_EV_DATA_REQUEST: - if (chan->tx_send_head == NULL) - chan->tx_send_head = skb_peek(skbs); - /* Queue data, but don't send. */ - skb_queue_splice_tail_init(skbs, &chan->tx_q); - break; - case L2CAP_EV_LOCAL_BUSY_DETECTED: - BT_DBG("Enter LOCAL_BUSY"); - set_bit(CONN_LOCAL_BUSY, &chan->conn_state); - - if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) { - /* The SREJ_SENT state must be aborted if we are to - * enter the LOCAL_BUSY state. - */ - l2cap_abort_rx_srej_sent(chan); - } - - l2cap_send_ack(chan); - - break; - case L2CAP_EV_LOCAL_BUSY_CLEAR: - BT_DBG("Exit LOCAL_BUSY"); - clear_bit(CONN_LOCAL_BUSY, &chan->conn_state); - - if (test_bit(CONN_RNR_SENT, &chan->conn_state)) { - struct l2cap_ctrl local_control; - memset(&local_control, 0, sizeof(local_control)); - local_control.sframe = 1; - local_control.super = L2CAP_SUPER_RR; - local_control.poll = 1; - local_control.reqseq = chan->buffer_seq; - l2cap_send_sframe(chan, &local_control); - - chan->retry_count = 1; - __set_monitor_timer(chan); - chan->tx_state = L2CAP_TX_STATE_WAIT_F; - } - break; - case L2CAP_EV_RECV_REQSEQ_AND_FBIT: - l2cap_process_reqseq(chan, control->reqseq); - - /* Fall through */ - - case L2CAP_EV_RECV_FBIT: - if (control && control->final) { - __clear_monitor_timer(chan); - if (chan->unacked_frames > 0) - __set_retrans_timer(chan); - chan->retry_count = 0; - chan->tx_state = L2CAP_TX_STATE_XMIT; - BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state); - } - break; - case L2CAP_EV_EXPLICIT_POLL: - /* Ignore */ - break; - case L2CAP_EV_MONITOR_TO: - if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) { - l2cap_send_rr_or_rnr(chan, 1); - __set_monitor_timer(chan); - chan->retry_count++; - } else { - l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED); - } - break; - default: - break; - } -} - -static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control, - struct sk_buff_head *skbs, u8 event) -{ - BT_DBG("chan %p, control %p, skbs %p, event %d, state %d", - chan, control, skbs, event, chan->tx_state); - - switch (chan->tx_state) { - case L2CAP_TX_STATE_XMIT: - l2cap_tx_state_xmit(chan, control, skbs, event); - break; - case L2CAP_TX_STATE_WAIT_F: - l2cap_tx_state_wait_f(chan, control, skbs, event); - break; - default: - /* Ignore event */ - break; - } -} - -static void l2cap_pass_to_tx(struct l2cap_chan *chan, - struct l2cap_ctrl *control) -{ - BT_DBG("chan %p, control %p", chan, control); - l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT); -} - -static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan, - struct l2cap_ctrl *control) -{ - BT_DBG("chan %p, control %p", chan, control); - l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT); -} - /* Copy frame to all raw sockets on that connection */ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb) { @@ -2526,7 +2165,7 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb) if (!nskb) continue; - if (chan->ops->recv(chan, nskb)) + if (chan->ops->recv(chan->data, nskb)) kfree_skb(nskb); } @@ -2556,9 +2195,9 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen); if (conn->hcon->type == LE_LINK) - lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING); + lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING); else - lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING); + lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING); cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE); cmd->code = code; @@ -2670,8 +2309,8 @@ static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan) efs.stype = chan->local_stype; efs.msdu = cpu_to_le16(chan->local_msdu); efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime); - efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT); - efs.flush_to = __constant_cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO); + efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT); + efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO); break; case L2CAP_MODE_STREAMING: @@ -2694,24 +2333,20 @@ static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan) static void l2cap_ack_timeout(struct work_struct *work) { struct l2cap_chan *chan = container_of(work, struct l2cap_chan, - ack_timer.work); - u16 frames_to_ack; + ack_timer.work); BT_DBG("chan %p", chan); l2cap_chan_lock(chan); - frames_to_ack = __seq_offset(chan, chan->buffer_seq, - chan->last_acked_seq); - - if (frames_to_ack) - l2cap_send_rr_or_rnr(chan, 0); + __l2cap_send_ack(chan); l2cap_chan_unlock(chan); + l2cap_chan_put(chan); } -int l2cap_ertm_init(struct l2cap_chan *chan) +static inline int l2cap_ertm_init(struct l2cap_chan *chan) { int err; @@ -2720,6 +2355,7 @@ int l2cap_ertm_init(struct l2cap_chan *chan) chan->expected_ack_seq = 0; chan->unacked_frames = 0; chan->buffer_seq = 0; + chan->num_acked = 0; chan->frames_sent = 0; chan->last_acked_seq = 0; chan->sdu = NULL; @@ -2740,15 +2376,12 @@ int l2cap_ertm_init(struct l2cap_chan *chan) skb_queue_head_init(&chan->srej_q); + INIT_LIST_HEAD(&chan->srej_l); err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win); if (err < 0) return err; - err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win); - if (err < 0) - l2cap_seq_list_free(&chan->srej_list); - - return err; + return l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win); } static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask) @@ -2874,7 +2507,6 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data) break; case L2CAP_MODE_STREAMING: - l2cap_txwin_setup(chan); rfc.mode = L2CAP_MODE_STREAMING; rfc.txwin_size = 0; rfc.max_transmit = 0; @@ -2905,7 +2537,7 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data) } req->dcid = cpu_to_le16(chan->dcid); - req->flags = __constant_cpu_to_le16(0); + req->flags = cpu_to_le16(0); return ptr - data; } @@ -3125,7 +2757,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data) } rsp->scid = cpu_to_le16(chan->dcid); rsp->result = cpu_to_le16(result); - rsp->flags = __constant_cpu_to_le16(0); + rsp->flags = cpu_to_le16(0x0000); return ptr - data; } @@ -3224,7 +2856,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi } req->dcid = cpu_to_le16(chan->dcid); - req->flags = __constant_cpu_to_le16(0); + req->flags = cpu_to_le16(0x0000); return ptr - data; } @@ -3251,8 +2883,8 @@ void __l2cap_connect_rsp_defer(struct l2cap_chan *chan) rsp.scid = cpu_to_le16(chan->dcid); rsp.dcid = cpu_to_le16(chan->scid); - rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS); - rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO); + rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); + rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp); @@ -3290,8 +2922,8 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len) * did not send an RFC option. */ rfc.mode = chan->mode; - rfc.retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO); - rfc.monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO); + rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO); + rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO); rfc.max_pdu_size = cpu_to_le16(chan->imtu); BT_ERR("Expected RFC option was not found, using defaults"); @@ -3354,7 +2986,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd lock_sock(parent); /* Check if the ACL is secure enough (if not SDP) */ - if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) && + if (psm != cpu_to_le16(0x0001) && !hci_conn_check_link_mode(conn->hcon)) { conn->disc_reason = HCI_ERROR_AUTH_FAILURE; result = L2CAP_CR_SEC_BLOCK; @@ -3363,16 +2995,25 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd result = L2CAP_CR_NO_MEM; - /* Check if we already have channel with that dcid */ - if (__l2cap_get_chan_by_dcid(conn, scid)) + /* Check for backlog size */ + if (sk_acceptq_is_full(parent)) { + BT_DBG("backlog full %d", parent->sk_ack_backlog); goto response; + } - chan = pchan->ops->new_connection(pchan); + chan = pchan->ops->new_connection(pchan->data); if (!chan) goto response; sk = chan->sk; + /* Check if we already have channel with that dcid */ + if (__l2cap_get_chan_by_dcid(conn, scid)) { + sock_set_flag(sk, SOCK_ZAPPED); + chan->ops->close(chan->data); + goto response; + } + hci_conn_hold(conn->hcon); bacpy(&bt_sk(sk)->src, conn->src); @@ -3426,7 +3067,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) { struct l2cap_info_req info; - info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK); + info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; conn->info_ident = l2cap_get_ident(conn); @@ -3548,7 +3189,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) { struct l2cap_cmd_rej_cid rej; - rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID); + rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID); rej.scid = cpu_to_le16(chan->scid); rej.dcid = cpu_to_le16(chan->dcid); @@ -3570,11 +3211,11 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr memcpy(chan->conf_req + chan->conf_len, req->data, len); chan->conf_len += len; - if (flags & L2CAP_CONF_FLAG_CONTINUATION) { + if (flags & 0x0001) { /* Incomplete config. Send empty response. */ l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, l2cap_build_conf_rsp(chan, rsp, - L2CAP_CONF_SUCCESS, flags), rsp); + L2CAP_CONF_SUCCESS, 0x0001), rsp); goto unlock; } @@ -3597,6 +3238,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) { set_default_fcs(chan); + l2cap_state_change(chan, BT_CONNECTED); + if (chan->mode == L2CAP_MODE_ERTM || chan->mode == L2CAP_MODE_STREAMING) err = l2cap_ertm_init(chan); @@ -3628,7 +3271,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, l2cap_build_conf_rsp(chan, rsp, - L2CAP_CONF_SUCCESS, flags), rsp); + L2CAP_CONF_SUCCESS, 0x0000), rsp); } unlock: @@ -3719,7 +3362,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr goto done; } - if (flags & L2CAP_CONF_FLAG_CONTINUATION) + if (flags & 0x01) goto done; set_bit(CONF_INPUT_DONE, &chan->conf_state); @@ -3727,6 +3370,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) { set_default_fcs(chan); + l2cap_state_change(chan, BT_CONNECTED); if (chan->mode == L2CAP_MODE_ERTM || chan->mode == L2CAP_MODE_STREAMING) err = l2cap_ertm_init(chan); @@ -3780,7 +3424,7 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd l2cap_chan_unlock(chan); - chan->ops->close(chan); + chan->ops->close(chan->data); l2cap_chan_put(chan); mutex_unlock(&conn->chan_lock); @@ -3814,7 +3458,7 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd l2cap_chan_unlock(chan); - chan->ops->close(chan); + chan->ops->close(chan->data); l2cap_chan_put(chan); mutex_unlock(&conn->chan_lock); @@ -3835,8 +3479,8 @@ static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cm u8 buf[8]; u32 feat_mask = l2cap_feat_mask; struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; - rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK); - rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS); + rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK); + rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); if (!disable_ertm) feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING | L2CAP_FEAT_FCS; @@ -3856,15 +3500,15 @@ static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cm else l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP; - rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN); - rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS); + rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); + rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan)); l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf), buf); } else { struct l2cap_info_rsp rsp; rsp.type = cpu_to_le16(type); - rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP); + rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP); l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp), &rsp); } @@ -3904,7 +3548,7 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) { struct l2cap_info_req req; - req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN); + req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); conn->info_ident = l2cap_get_ident(conn); @@ -4139,9 +3783,9 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn, err = l2cap_check_conn_param(min, max, latency, to_multiplier); if (err) - rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED); + rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED); else - rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED); + rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED); l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP, sizeof(rsp), &rsp); @@ -4289,7 +3933,7 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn, BT_ERR("Wrong link type (%d)", err); /* FIXME: Map err to a valid reason */ - rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD); + rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD); l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej); } @@ -4321,38 +3965,65 @@ static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb) return 0; } -static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan) +static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan) { - struct l2cap_ctrl control; + u32 control = 0; - BT_DBG("chan %p", chan); + chan->frames_sent = 0; - memset(&control, 0, sizeof(control)); - control.sframe = 1; - control.final = 1; - control.reqseq = chan->buffer_seq; - set_bit(CONN_SEND_FBIT, &chan->conn_state); + control |= __set_reqseq(chan, chan->buffer_seq); if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { - control.super = L2CAP_SUPER_RNR; - l2cap_send_sframe(chan, &control); + control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR); + l2cap_send_sframe(chan, control); + set_bit(CONN_RNR_SENT, &chan->conn_state); } - if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) && - chan->unacked_frames > 0) - __set_retrans_timer(chan); + if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) + l2cap_retransmit_frames(chan); - /* Send pending iframes */ l2cap_ertm_send(chan); if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) && - test_bit(CONN_SEND_FBIT, &chan->conn_state)) { - /* F-bit wasn't sent in an s-frame or i-frame yet, so - * send it now. - */ - control.super = L2CAP_SUPER_RR; - l2cap_send_sframe(chan, &control); + chan->frames_sent == 0) { + control |= __set_ctrl_super(chan, L2CAP_SUPER_RR); + l2cap_send_sframe(chan, control); + } +} + +static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar) +{ + struct sk_buff *next_skb; + int tx_seq_offset, next_tx_seq_offset; + + bt_cb(skb)->control.txseq = tx_seq; + bt_cb(skb)->control.sar = sar; + + next_skb = skb_peek(&chan->srej_q); + + tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq); + + while (next_skb) { + if (bt_cb(next_skb)->control.txseq == tx_seq) + return -EINVAL; + + next_tx_seq_offset = __seq_offset(chan, + bt_cb(next_skb)->control.txseq, chan->buffer_seq); + + if (next_tx_seq_offset > tx_seq_offset) { + __skb_queue_before(&chan->srej_q, next_skb, skb); + return 0; + } + + if (skb_queue_is_last(&chan->srej_q, next_skb)) + next_skb = NULL; + else + next_skb = skb_queue_next(&chan->srej_q, next_skb); } + + __skb_queue_tail(&chan->srej_q, skb); + + return 0; } static void append_skb_frag(struct sk_buff *skb, @@ -4374,17 +4045,16 @@ static void append_skb_frag(struct sk_buff *skb, skb->truesize += new_frag->truesize; } -static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, - struct l2cap_ctrl *control) +static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control) { int err = -EINVAL; - switch (control->sar) { + switch (__get_ctrl_sar(chan, control)) { case L2CAP_SAR_UNSEGMENTED: if (chan->sdu) break; - err = chan->ops->recv(chan, skb); + err = chan->ops->recv(chan->data, skb); break; case L2CAP_SAR_START: @@ -4434,7 +4104,7 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, if (chan->sdu->len != chan->sdu_len) break; - err = chan->ops->recv(chan, chan->sdu); + err = chan->ops->recv(chan->data, chan->sdu); if (!err) { /* Reassembly complete */ @@ -4456,609 +4126,448 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, return err; } -void l2cap_chan_busy(struct l2cap_chan *chan, int busy) +static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan) { - u8 event; + BT_DBG("chan %p, Enter local busy", chan); - if (chan->mode != L2CAP_MODE_ERTM) - return; + set_bit(CONN_LOCAL_BUSY, &chan->conn_state); + l2cap_seq_list_clear(&chan->srej_list); - event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR; - l2cap_tx(chan, NULL, NULL, event); + __set_ack_timer(chan); } -static int l2cap_rx_queued_iframes(struct l2cap_chan *chan) +static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan) { - int err = 0; - /* Pass sequential frames to l2cap_reassemble_sdu() - * until a gap is encountered. - */ + u32 control; - BT_DBG("chan %p", chan); + if (!test_bit(CONN_RNR_SENT, &chan->conn_state)) + goto done; - while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { - struct sk_buff *skb; - BT_DBG("Searching for skb with txseq %d (queue len %d)", - chan->buffer_seq, skb_queue_len(&chan->srej_q)); + control = __set_reqseq(chan, chan->buffer_seq); + control |= __set_ctrl_poll(chan); + control |= __set_ctrl_super(chan, L2CAP_SUPER_RR); + l2cap_send_sframe(chan, control); + chan->retry_count = 1; - skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq); + __clear_retrans_timer(chan); + __set_monitor_timer(chan); - if (!skb) - break; + set_bit(CONN_WAIT_F, &chan->conn_state); - skb_unlink(skb, &chan->srej_q); - chan->buffer_seq = __next_seq(chan, chan->buffer_seq); - err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control); - if (err) - break; - } +done: + clear_bit(CONN_LOCAL_BUSY, &chan->conn_state); + clear_bit(CONN_RNR_SENT, &chan->conn_state); - if (skb_queue_empty(&chan->srej_q)) { - chan->rx_state = L2CAP_RX_STATE_RECV; - l2cap_send_ack(chan); - } + BT_DBG("chan %p, Exit local busy", chan); +} - return err; +void l2cap_chan_busy(struct l2cap_chan *chan, int busy) +{ + if (chan->mode == L2CAP_MODE_ERTM) { + if (busy) + l2cap_ertm_enter_local_busy(chan); + else + l2cap_ertm_exit_local_busy(chan); + } } -static void l2cap_handle_srej(struct l2cap_chan *chan, - struct l2cap_ctrl *control) +static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq) { struct sk_buff *skb; + u32 control; - BT_DBG("chan %p, control %p", chan, control); + while ((skb = skb_peek(&chan->srej_q)) && + !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { + int err; - if (control->reqseq == chan->next_tx_seq) { - BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq); - l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); - return; - } + if (bt_cb(skb)->control.txseq != tx_seq) + break; - skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq); + skb = skb_dequeue(&chan->srej_q); + control = __set_ctrl_sar(chan, bt_cb(skb)->control.sar); + err = l2cap_reassemble_sdu(chan, skb, control); - if (skb == NULL) { - BT_DBG("Seq %d not available for retransmission", - control->reqseq); - return; - } + if (err < 0) { + l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); + break; + } - if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) { - BT_DBG("Retry limit exceeded (%d)", chan->max_tx); - l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); - return; + chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej); + tx_seq = __next_seq(chan, tx_seq); } +} - clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); - - if (control->poll) { - l2cap_pass_to_tx(chan, control); - - set_bit(CONN_SEND_FBIT, &chan->conn_state); - l2cap_retransmit(chan, control); - l2cap_ertm_send(chan); - - if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) { - set_bit(CONN_SREJ_ACT, &chan->conn_state); - chan->srej_save_reqseq = control->reqseq; - } - } else { - l2cap_pass_to_tx_fbit(chan, control); +static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq) +{ + struct srej_list *l, *tmp; + u32 control; - if (control->final) { - if (chan->srej_save_reqseq != control->reqseq || - !test_and_clear_bit(CONN_SREJ_ACT, - &chan->conn_state)) - l2cap_retransmit(chan, control); - } else { - l2cap_retransmit(chan, control); - if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) { - set_bit(CONN_SREJ_ACT, &chan->conn_state); - chan->srej_save_reqseq = control->reqseq; - } + list_for_each_entry_safe(l, tmp, &chan->srej_l, list) { + if (l->tx_seq == tx_seq) { + list_del(&l->list); + kfree(l); + return; } + control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ); + control |= __set_reqseq(chan, l->tx_seq); + l2cap_send_sframe(chan, control); + list_del(&l->list); + list_add_tail(&l->list, &chan->srej_l); } } -static void l2cap_handle_rej(struct l2cap_chan *chan, - struct l2cap_ctrl *control) +static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq) { - struct sk_buff *skb; + struct srej_list *new; + u32 control; - BT_DBG("chan %p, control %p", chan, control); + while (tx_seq != chan->expected_tx_seq) { + control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ); + control |= __set_reqseq(chan, chan->expected_tx_seq); + l2cap_seq_list_append(&chan->srej_list, chan->expected_tx_seq); + l2cap_send_sframe(chan, control); - if (control->reqseq == chan->next_tx_seq) { - BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq); - l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); - return; - } + new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC); + if (!new) + return -ENOMEM; - skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq); + new->tx_seq = chan->expected_tx_seq; - if (chan->max_tx && skb && - bt_cb(skb)->control.retries >= chan->max_tx) { - BT_DBG("Retry limit exceeded (%d)", chan->max_tx); - l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); - return; - } + chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq); - clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); + list_add_tail(&new->list, &chan->srej_l); + } - l2cap_pass_to_tx(chan, control); + chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq); - if (control->final) { - if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) - l2cap_retransmit_all(chan, control); - } else { - l2cap_retransmit_all(chan, control); - l2cap_ertm_send(chan); - if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) - set_bit(CONN_REJ_ACT, &chan->conn_state); - } + return 0; } -static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq) +static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb) { - BT_DBG("chan %p, txseq %d", chan, txseq); + u16 tx_seq = __get_txseq(chan, rx_control); + u16 req_seq = __get_reqseq(chan, rx_control); + u8 sar = __get_ctrl_sar(chan, rx_control); + int tx_seq_offset, expected_tx_seq_offset; + int num_to_ack = (chan->tx_win/6) + 1; + int err = 0; - BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq, - chan->expected_tx_seq); + BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len, + tx_seq, rx_control); - if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) { - if (__seq_offset(chan, txseq, chan->last_acked_seq) >= - chan->tx_win) { - /* See notes below regarding "double poll" and - * invalid packets. - */ - if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) { - BT_DBG("Invalid/Ignore - after SREJ"); - return L2CAP_TXSEQ_INVALID_IGNORE; - } else { - BT_DBG("Invalid - in window after SREJ sent"); - return L2CAP_TXSEQ_INVALID; - } - } + if (__is_ctrl_final(chan, rx_control) && + test_bit(CONN_WAIT_F, &chan->conn_state)) { + __clear_monitor_timer(chan); + if (chan->unacked_frames > 0) + __set_retrans_timer(chan); + clear_bit(CONN_WAIT_F, &chan->conn_state); + } - if (chan->srej_list.head == txseq) { - BT_DBG("Expected SREJ"); - return L2CAP_TXSEQ_EXPECTED_SREJ; - } + chan->expected_ack_seq = req_seq; + l2cap_drop_acked_frames(chan); - if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) { - BT_DBG("Duplicate SREJ - txseq already stored"); - return L2CAP_TXSEQ_DUPLICATE_SREJ; - } + tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq); - if (l2cap_seq_list_contains(&chan->srej_list, txseq)) { - BT_DBG("Unexpected SREJ - not requested"); - return L2CAP_TXSEQ_UNEXPECTED_SREJ; - } + /* invalid tx_seq */ + if (tx_seq_offset >= chan->tx_win) { + l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); + goto drop; } - if (chan->expected_tx_seq == txseq) { - if (__seq_offset(chan, txseq, chan->last_acked_seq) >= - chan->tx_win) { - BT_DBG("Invalid - txseq outside tx window"); - return L2CAP_TXSEQ_INVALID; - } else { - BT_DBG("Expected"); - return L2CAP_TXSEQ_EXPECTED; - } + if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { + if (!test_bit(CONN_RNR_SENT, &chan->conn_state)) + l2cap_send_ack(chan); + goto drop; } - if (__seq_offset(chan, txseq, chan->last_acked_seq) < - __seq_offset(chan, chan->expected_tx_seq, - chan->last_acked_seq)){ - BT_DBG("Duplicate - expected_tx_seq later than txseq"); - return L2CAP_TXSEQ_DUPLICATE; - } - - if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) { - /* A source of invalid packets is a "double poll" condition, - * where delays cause us to send multiple poll packets. If - * the remote stack receives and processes both polls, - * sequence numbers can wrap around in such a way that a - * resent frame has a sequence number that looks like new data - * with a sequence gap. This would trigger an erroneous SREJ - * request. - * - * Fortunately, this is impossible with a tx window that's - * less than half of the maximum sequence number, which allows - * invalid frames to be safely ignored. - * - * With tx window sizes greater than half of the tx window - * maximum, the frame is invalid and cannot be ignored. This - * causes a disconnect. - */ + if (tx_seq == chan->expected_tx_seq) + goto expected; - if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) { - BT_DBG("Invalid/Ignore - txseq outside tx window"); - return L2CAP_TXSEQ_INVALID_IGNORE; - } else { - BT_DBG("Invalid - txseq outside tx window"); - return L2CAP_TXSEQ_INVALID; - } - } else { - BT_DBG("Unexpected - txseq indicates missing frames"); - return L2CAP_TXSEQ_UNEXPECTED; - } -} - -static int l2cap_rx_state_recv(struct l2cap_chan *chan, - struct l2cap_ctrl *control, - struct sk_buff *skb, u8 event) -{ - int err = 0; - bool skb_in_use = 0; + if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) { + struct srej_list *first; - BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb, - event); + first = list_first_entry(&chan->srej_l, + struct srej_list, list); + if (tx_seq == first->tx_seq) { + l2cap_add_to_srej_queue(chan, skb, tx_seq, sar); + l2cap_check_srej_gap(chan, tx_seq); - switch (event) { - case L2CAP_EV_RECV_IFRAME: - switch (l2cap_classify_txseq(chan, control->txseq)) { - case L2CAP_TXSEQ_EXPECTED: - l2cap_pass_to_tx(chan, control); + list_del(&first->list); + kfree(first); - if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { - BT_DBG("Busy, discarding expected seq %d", - control->txseq); - break; + if (list_empty(&chan->srej_l)) { + chan->buffer_seq = chan->buffer_seq_srej; + clear_bit(CONN_SREJ_SENT, &chan->conn_state); + l2cap_send_ack(chan); + BT_DBG("chan %p, Exit SREJ_SENT", chan); } + } else { + struct srej_list *l; - chan->expected_tx_seq = __next_seq(chan, - control->txseq); - - chan->buffer_seq = chan->expected_tx_seq; - skb_in_use = 1; - - err = l2cap_reassemble_sdu(chan, skb, control); - if (err) - break; + /* duplicated tx_seq */ + if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0) + goto drop; - if (control->final) { - if (!test_and_clear_bit(CONN_REJ_ACT, - &chan->conn_state)) { - control->final = 0; - l2cap_retransmit_all(chan, control); - l2cap_ertm_send(chan); + list_for_each_entry(l, &chan->srej_l, list) { + if (l->tx_seq == tx_seq) { + l2cap_resend_srejframe(chan, tx_seq); + return 0; } } - if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) - l2cap_send_ack(chan); - break; - case L2CAP_TXSEQ_UNEXPECTED: - l2cap_pass_to_tx(chan, control); - - /* Can't issue SREJ frames in the local busy state. - * Drop this frame, it will be seen as missing - * when local busy is exited. - */ - if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { - BT_DBG("Busy, discarding unexpected seq %d", - control->txseq); - break; + err = l2cap_send_srejframe(chan, tx_seq); + if (err < 0) { + l2cap_send_disconn_req(chan->conn, chan, -err); + return err; } + } + } else { + expected_tx_seq_offset = __seq_offset(chan, + chan->expected_tx_seq, chan->buffer_seq); - /* There was a gap in the sequence, so an SREJ - * must be sent for each missing frame. The - * current frame is stored for later use. - */ - skb_queue_tail(&chan->srej_q, skb); - skb_in_use = 1; - BT_DBG("Queued %p (queue len %d)", skb, - skb_queue_len(&chan->srej_q)); + /* duplicated tx_seq */ + if (tx_seq_offset < expected_tx_seq_offset) + goto drop; - clear_bit(CONN_SREJ_ACT, &chan->conn_state); - l2cap_seq_list_clear(&chan->srej_list); - l2cap_send_srej(chan, control->txseq); + set_bit(CONN_SREJ_SENT, &chan->conn_state); - chan->rx_state = L2CAP_RX_STATE_SREJ_SENT; - break; - case L2CAP_TXSEQ_DUPLICATE: - l2cap_pass_to_tx(chan, control); - break; - case L2CAP_TXSEQ_INVALID_IGNORE: - break; - case L2CAP_TXSEQ_INVALID: - default: - l2cap_send_disconn_req(chan->conn, chan, - ECONNRESET); - break; - } - break; - case L2CAP_EV_RECV_RR: - l2cap_pass_to_tx(chan, control); - if (control->final) { - clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); + BT_DBG("chan %p, Enter SREJ", chan); - if (!test_and_clear_bit(CONN_REJ_ACT, - &chan->conn_state)) { - control->final = 0; - l2cap_retransmit_all(chan, control); - } + INIT_LIST_HEAD(&chan->srej_l); + chan->buffer_seq_srej = chan->buffer_seq; - l2cap_ertm_send(chan); - } else if (control->poll) { - l2cap_send_i_or_rr_or_rnr(chan); - } else { - if (test_and_clear_bit(CONN_REMOTE_BUSY, - &chan->conn_state) && - chan->unacked_frames) - __set_retrans_timer(chan); + __skb_queue_head_init(&chan->srej_q); + l2cap_add_to_srej_queue(chan, skb, tx_seq, sar); - l2cap_ertm_send(chan); - } - break; - case L2CAP_EV_RECV_RNR: - set_bit(CONN_REMOTE_BUSY, &chan->conn_state); - l2cap_pass_to_tx(chan, control); - if (control && control->poll) { - set_bit(CONN_SEND_FBIT, &chan->conn_state); - l2cap_send_rr_or_rnr(chan, 0); + /* Set P-bit only if there are some I-frames to ack. */ + if (__clear_ack_timer(chan)) + set_bit(CONN_SEND_PBIT, &chan->conn_state); + + err = l2cap_send_srejframe(chan, tx_seq); + if (err < 0) { + l2cap_send_disconn_req(chan->conn, chan, -err); + return err; } - __clear_retrans_timer(chan); - l2cap_seq_list_clear(&chan->retrans_list); - break; - case L2CAP_EV_RECV_REJ: - l2cap_handle_rej(chan, control); - break; - case L2CAP_EV_RECV_SREJ: - l2cap_handle_srej(chan, control); - break; - default: - break; } + return 0; - if (skb && !skb_in_use) { - BT_DBG("Freeing %p", skb); - kfree_skb(skb); +expected: + chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq); + + if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) { + bt_cb(skb)->control.txseq = tx_seq; + bt_cb(skb)->control.sar = sar; + __skb_queue_tail(&chan->srej_q, skb); + return 0; } - return err; -} + err = l2cap_reassemble_sdu(chan, skb, rx_control); + chan->buffer_seq = __next_seq(chan, chan->buffer_seq); -static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan, - struct l2cap_ctrl *control, - struct sk_buff *skb, u8 event) -{ - int err = 0; - u16 txseq = control->txseq; - bool skb_in_use = 0; - - BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb, - event); - - switch (event) { - case L2CAP_EV_RECV_IFRAME: - switch (l2cap_classify_txseq(chan, txseq)) { - case L2CAP_TXSEQ_EXPECTED: - /* Keep frame for reassembly later */ - l2cap_pass_to_tx(chan, control); - skb_queue_tail(&chan->srej_q, skb); - skb_in_use = 1; - BT_DBG("Queued %p (queue len %d)", skb, - skb_queue_len(&chan->srej_q)); - - chan->expected_tx_seq = __next_seq(chan, txseq); - break; - case L2CAP_TXSEQ_EXPECTED_SREJ: - l2cap_seq_list_pop(&chan->srej_list); + if (err < 0) { + l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); + return err; + } - l2cap_pass_to_tx(chan, control); - skb_queue_tail(&chan->srej_q, skb); - skb_in_use = 1; - BT_DBG("Queued %p (queue len %d)", skb, - skb_queue_len(&chan->srej_q)); + if (__is_ctrl_final(chan, rx_control)) { + if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) + l2cap_retransmit_frames(chan); + } - err = l2cap_rx_queued_iframes(chan); - if (err) - break; - break; - case L2CAP_TXSEQ_UNEXPECTED: - /* Got a frame that can't be reassembled yet. - * Save it for later, and send SREJs to cover - * the missing frames. - */ - skb_queue_tail(&chan->srej_q, skb); - skb_in_use = 1; - BT_DBG("Queued %p (queue len %d)", skb, - skb_queue_len(&chan->srej_q)); - - l2cap_pass_to_tx(chan, control); - l2cap_send_srej(chan, control->txseq); - break; - case L2CAP_TXSEQ_UNEXPECTED_SREJ: - /* This frame was requested with an SREJ, but - * some expected retransmitted frames are - * missing. Request retransmission of missing - * SREJ'd frames. - */ - skb_queue_tail(&chan->srej_q, skb); - skb_in_use = 1; - BT_DBG("Queued %p (queue len %d)", skb, - skb_queue_len(&chan->srej_q)); - - l2cap_pass_to_tx(chan, control); - l2cap_send_srej_list(chan, control->txseq); - break; - case L2CAP_TXSEQ_DUPLICATE_SREJ: - /* We've already queued this frame. Drop this copy. */ - l2cap_pass_to_tx(chan, control); - break; - case L2CAP_TXSEQ_DUPLICATE: - /* Expecting a later sequence number, so this frame - * was already received. Ignore it completely. - */ - break; - case L2CAP_TXSEQ_INVALID_IGNORE: - break; - case L2CAP_TXSEQ_INVALID: - default: - l2cap_send_disconn_req(chan->conn, chan, - ECONNRESET); - break; - } - break; - case L2CAP_EV_RECV_RR: - l2cap_pass_to_tx(chan, control); - if (control->final) { - clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); + chan->num_acked = (chan->num_acked + 1) % num_to_ack; + if (chan->num_acked == num_to_ack - 1) + l2cap_send_ack(chan); + else + __set_ack_timer(chan); - if (!test_and_clear_bit(CONN_REJ_ACT, - &chan->conn_state)) { - control->final = 0; - l2cap_retransmit_all(chan, control); - } + return 0; - l2cap_ertm_send(chan); - } else if (control->poll) { - if (test_and_clear_bit(CONN_REMOTE_BUSY, - &chan->conn_state) && - chan->unacked_frames) { - __set_retrans_timer(chan); - } +drop: + kfree_skb(skb); + return 0; +} - set_bit(CONN_SEND_FBIT, &chan->conn_state); - l2cap_send_srej_tail(chan); - } else { - if (test_and_clear_bit(CONN_REMOTE_BUSY, - &chan->conn_state) && - chan->unacked_frames) +static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control) +{ + BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, + __get_reqseq(chan, rx_control), rx_control); + + chan->expected_ack_seq = __get_reqseq(chan, rx_control); + l2cap_drop_acked_frames(chan); + + if (__is_ctrl_poll(chan, rx_control)) { + set_bit(CONN_SEND_FBIT, &chan->conn_state); + if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) { + if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) && + (chan->unacked_frames > 0)) __set_retrans_timer(chan); - l2cap_send_ack(chan); - } - break; - case L2CAP_EV_RECV_RNR: - set_bit(CONN_REMOTE_BUSY, &chan->conn_state); - l2cap_pass_to_tx(chan, control); - if (control->poll) { - l2cap_send_srej_tail(chan); + clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); + l2cap_send_srejtail(chan); } else { - struct l2cap_ctrl rr_control; - memset(&rr_control, 0, sizeof(rr_control)); - rr_control.sframe = 1; - rr_control.super = L2CAP_SUPER_RR; - rr_control.reqseq = chan->buffer_seq; - l2cap_send_sframe(chan, &rr_control); + l2cap_send_i_or_rr_or_rnr(chan); } - break; - case L2CAP_EV_RECV_REJ: - l2cap_handle_rej(chan, control); - break; - case L2CAP_EV_RECV_SREJ: - l2cap_handle_srej(chan, control); - break; - } + } else if (__is_ctrl_final(chan, rx_control)) { + clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); - if (skb && !skb_in_use) { - BT_DBG("Freeing %p", skb); - kfree_skb(skb); - } + if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) + l2cap_retransmit_frames(chan); - return err; + } else { + if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) && + (chan->unacked_frames > 0)) + __set_retrans_timer(chan); + + clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); + if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) + l2cap_send_ack(chan); + else + l2cap_ertm_send(chan); + } } -static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq) +static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control) { - /* Make sure reqseq is for a packet that has been sent but not acked */ - u16 unacked; + u16 tx_seq = __get_reqseq(chan, rx_control); - unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq); - return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked; -} + BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control); + + clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); + + chan->expected_ack_seq = tx_seq; + l2cap_drop_acked_frames(chan); + + if (__is_ctrl_final(chan, rx_control)) { + if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) + l2cap_retransmit_frames(chan); + } else { + l2cap_retransmit_frames(chan); -static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control, - struct sk_buff *skb, u8 event) + if (test_bit(CONN_WAIT_F, &chan->conn_state)) + set_bit(CONN_REJ_ACT, &chan->conn_state); + } +} +static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control) { - int err = 0; + u16 tx_seq = __get_reqseq(chan, rx_control); - BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan, - control, skb, event, chan->rx_state); + BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control); - if (__valid_reqseq(chan, control->reqseq)) { - switch (chan->rx_state) { - case L2CAP_RX_STATE_RECV: - err = l2cap_rx_state_recv(chan, control, skb, event); - break; - case L2CAP_RX_STATE_SREJ_SENT: - err = l2cap_rx_state_srej_sent(chan, control, skb, - event); - break; - default: - /* shut it down */ - break; + clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); + + if (__is_ctrl_poll(chan, rx_control)) { + chan->expected_ack_seq = tx_seq; + l2cap_drop_acked_frames(chan); + + set_bit(CONN_SEND_FBIT, &chan->conn_state); + l2cap_retransmit_one_frame(chan, tx_seq); + + l2cap_ertm_send(chan); + + if (test_bit(CONN_WAIT_F, &chan->conn_state)) { + chan->srej_save_reqseq = tx_seq; + set_bit(CONN_SREJ_ACT, &chan->conn_state); } + } else if (__is_ctrl_final(chan, rx_control)) { + if (test_bit(CONN_SREJ_ACT, &chan->conn_state) && + chan->srej_save_reqseq == tx_seq) + clear_bit(CONN_SREJ_ACT, &chan->conn_state); + else + l2cap_retransmit_one_frame(chan, tx_seq); } else { - BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d", - control->reqseq, chan->next_tx_seq, - chan->expected_ack_seq); - l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); + l2cap_retransmit_one_frame(chan, tx_seq); + if (test_bit(CONN_WAIT_F, &chan->conn_state)) { + chan->srej_save_reqseq = tx_seq; + set_bit(CONN_SREJ_ACT, &chan->conn_state); + } } - - return err; } -static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control, - struct sk_buff *skb) +static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control) { - int err = 0; + u16 tx_seq = __get_reqseq(chan, rx_control); - BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb, - chan->rx_state); + BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control); - if (l2cap_classify_txseq(chan, control->txseq) == - L2CAP_TXSEQ_EXPECTED) { - l2cap_pass_to_tx(chan, control); + set_bit(CONN_REMOTE_BUSY, &chan->conn_state); + chan->expected_ack_seq = tx_seq; + l2cap_drop_acked_frames(chan); - BT_DBG("buffer_seq %d->%d", chan->buffer_seq, - __next_seq(chan, chan->buffer_seq)); + if (__is_ctrl_poll(chan, rx_control)) + set_bit(CONN_SEND_FBIT, &chan->conn_state); - chan->buffer_seq = __next_seq(chan, chan->buffer_seq); + if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) { + __clear_retrans_timer(chan); + if (__is_ctrl_poll(chan, rx_control)) + l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL); + return; + } - l2cap_reassemble_sdu(chan, skb, control); + if (__is_ctrl_poll(chan, rx_control)) { + l2cap_send_srejtail(chan); } else { - if (chan->sdu) { - kfree_skb(chan->sdu); - chan->sdu = NULL; - } - chan->sdu_last_frag = NULL; - chan->sdu_len = 0; + rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR); + l2cap_send_sframe(chan, rx_control); + } +} - if (skb) { - BT_DBG("Freeing %p", skb); - kfree_skb(skb); - } +static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb) +{ + BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len); + + if (__is_ctrl_final(chan, rx_control) && + test_bit(CONN_WAIT_F, &chan->conn_state)) { + __clear_monitor_timer(chan); + if (chan->unacked_frames > 0) + __set_retrans_timer(chan); + clear_bit(CONN_WAIT_F, &chan->conn_state); } - chan->last_acked_seq = control->txseq; - chan->expected_tx_seq = __next_seq(chan, control->txseq); + switch (__get_ctrl_super(chan, rx_control)) { + case L2CAP_SUPER_RR: + l2cap_data_channel_rrframe(chan, rx_control); + break; - return err; + case L2CAP_SUPER_REJ: + l2cap_data_channel_rejframe(chan, rx_control); + break; + + case L2CAP_SUPER_SREJ: + l2cap_data_channel_srejframe(chan, rx_control); + break; + + case L2CAP_SUPER_RNR: + l2cap_data_channel_rnrframe(chan, rx_control); + break; + } + + kfree_skb(skb); + return 0; } -static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) +static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) { - struct l2cap_ctrl *control = &bt_cb(skb)->control; - u16 len; - u8 event; + u32 control; + u16 req_seq; + int len, next_tx_seq_offset, req_seq_offset; __unpack_control(chan, skb); + control = __get_control(chan, skb->data); + skb_pull(skb, __ctrl_size(chan)); len = skb->len; /* * We can just drop the corrupted I-frame here. * Receiver will miss it and start proper recovery - * procedures and ask for retransmission. + * procedures and ask retransmission. */ if (l2cap_check_fcs(chan, skb)) goto drop; - if (!control->sframe && control->sar == L2CAP_SAR_START) + if (__is_sar_start(chan, control) && !__is_sframe(chan, control)) len -= L2CAP_SDULEN_SIZE; if (chan->fcs == L2CAP_FCS_CRC16) @@ -5069,57 +4578,34 @@ static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) goto drop; } - if (!control->sframe) { - int err; - - BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d", - control->sar, control->reqseq, control->final, - control->txseq); + req_seq = __get_reqseq(chan, control); - /* Validate F-bit - F=0 always valid, F=1 only - * valid in TX WAIT_F - */ - if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F) - goto drop; + req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq); - if (chan->mode != L2CAP_MODE_STREAMING) { - event = L2CAP_EV_RECV_IFRAME; - err = l2cap_rx(chan, control, skb, event); - } else { - err = l2cap_stream_rx(chan, control, skb); - } + next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq, + chan->expected_ack_seq); - if (err) - l2cap_send_disconn_req(chan->conn, chan, - ECONNRESET); - } else { - const u8 rx_func_to_event[4] = { - L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ, - L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ - }; + /* check for invalid req-seq */ + if (req_seq_offset > next_tx_seq_offset) { + l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); + goto drop; + } - /* Only I-frames are expected in streaming mode */ - if (chan->mode == L2CAP_MODE_STREAMING) + if (!__is_sframe(chan, control)) { + if (len < 0) { + l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); goto drop; + } - BT_DBG("sframe reqseq %d, final %d, poll %d, super %d", - control->reqseq, control->final, control->poll, - control->super); - + l2cap_data_channel_iframe(chan, control, skb); + } else { if (len != 0) { BT_ERR("%d", len); l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); goto drop; } - /* Validate F and P bits */ - if (control->final && (control->poll || - chan->tx_state != L2CAP_TX_STATE_WAIT_F)) - goto drop; - - event = rx_func_to_event[control->super]; - if (l2cap_rx(chan, control, skb, event)) - l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); + l2cap_data_channel_sframe(chan, control, skb); } return 0; @@ -5129,27 +4615,19 @@ static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) return 0; } -static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid, - struct sk_buff *skb) +static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb) { struct l2cap_chan *chan; + u32 control; + u16 tx_seq; + int len; chan = l2cap_get_chan_by_scid(conn, cid); if (!chan) { - if (cid == L2CAP_CID_A2MP) { - chan = a2mp_channel_create(conn, skb); - if (!chan) { - kfree_skb(skb); - return; - } - - l2cap_chan_lock(chan); - } else { - BT_DBG("unknown cid 0x%4.4x", cid); - /* Drop packet and return */ - kfree_skb(skb); - return; - } + BT_DBG("unknown cid 0x%4.4x", cid); + /* Drop packet and return */ + kfree_skb(skb); + return 0; } BT_DBG("chan %p, len %d", chan, skb->len); @@ -5167,13 +4645,49 @@ static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid, if (chan->imtu < skb->len) goto drop; - if (!chan->ops->recv(chan, skb)) + if (!chan->ops->recv(chan->data, skb)) goto done; break; case L2CAP_MODE_ERTM: + l2cap_ertm_data_rcv(chan, skb); + + goto done; + case L2CAP_MODE_STREAMING: - l2cap_data_rcv(chan, skb); + control = __get_control(chan, skb->data); + skb_pull(skb, __ctrl_size(chan)); + len = skb->len; + + if (l2cap_check_fcs(chan, skb)) + goto drop; + + if (__is_sar_start(chan, control)) + len -= L2CAP_SDULEN_SIZE; + + if (chan->fcs == L2CAP_FCS_CRC16) + len -= L2CAP_FCS_SIZE; + + if (len > chan->mps || len < 0 || __is_sframe(chan, control)) + goto drop; + + tx_seq = __get_txseq(chan, control); + + if (chan->expected_tx_seq != tx_seq) { + /* Frame(s) missing - must discard partial SDU */ + kfree_skb(chan->sdu); + chan->sdu = NULL; + chan->sdu_last_frag = NULL; + chan->sdu_len = 0; + + /* TODO: Notify userland of missing data */ + } + + chan->expected_tx_seq = __next_seq(chan, tx_seq); + + if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE) + l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); + goto done; default: @@ -5186,10 +4700,11 @@ static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid, done: l2cap_chan_unlock(chan); + + return 0; } -static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, - struct sk_buff *skb) +static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb) { struct l2cap_chan *chan; @@ -5205,15 +4720,17 @@ static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, if (chan->imtu < skb->len) goto drop; - if (!chan->ops->recv(chan, skb)) - return; + if (!chan->ops->recv(chan->data, skb)) + return 0; drop: kfree_skb(skb); + + return 0; } -static void l2cap_att_channel(struct l2cap_conn *conn, u16 cid, - struct sk_buff *skb) +static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid, + struct sk_buff *skb) { struct l2cap_chan *chan; @@ -5229,11 +4746,13 @@ static void l2cap_att_channel(struct l2cap_conn *conn, u16 cid, if (chan->imtu < skb->len) goto drop; - if (!chan->ops->recv(chan, skb)) - return; + if (!chan->ops->recv(chan->data, skb)) + return 0; drop: kfree_skb(skb); + + return 0; } static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb) @@ -5261,7 +4780,7 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb) case L2CAP_CID_CONN_LESS: psm = get_unaligned((__le16 *) skb->data); - skb_pull(skb, L2CAP_PSMLEN_SIZE); + skb_pull(skb, 2); l2cap_conless_channel(conn, psm, skb); break; @@ -5455,17 +4974,6 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) rsp.status = cpu_to_le16(stat); l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp); - - if (!test_bit(CONF_REQ_SENT, &chan->conf_state) && - res == L2CAP_CR_SUCCESS) { - char buf[128]; - set_bit(CONF_REQ_SENT, &chan->conf_state); - l2cap_send_cmd(conn, l2cap_get_ident(conn), - L2CAP_CONF_REQ, - l2cap_build_conf_req(chan, buf), - buf); - chan->num_conf_req++; - } } l2cap_chan_unlock(chan); diff --git a/trunk/net/bluetooth/l2cap_sock.c b/trunk/net/bluetooth/l2cap_sock.c index a4bb27e8427e..3bb1611b9d48 100644 --- a/trunk/net/bluetooth/l2cap_sock.c +++ b/trunk/net/bluetooth/l2cap_sock.c @@ -27,6 +27,7 @@ /* Bluetooth L2CAP sockets. */ +#include #include #include @@ -88,8 +89,8 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) if (err < 0) goto done; - if (__le16_to_cpu(la.l2_psm) == L2CAP_PSM_SDP || - __le16_to_cpu(la.l2_psm) == L2CAP_PSM_RFCOMM) + if (__le16_to_cpu(la.l2_psm) == 0x0001 || + __le16_to_cpu(la.l2_psm) == 0x0003) chan->sec_level = BT_SECURITY_SDP; bacpy(&bt_sk(sk)->src, &la.l2_bdaddr); @@ -445,22 +446,6 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch return err; } -static bool l2cap_valid_mtu(struct l2cap_chan *chan, u16 mtu) -{ - switch (chan->scid) { - case L2CAP_CID_LE_DATA: - if (mtu < L2CAP_LE_MIN_MTU) - return false; - break; - - default: - if (mtu < L2CAP_DEFAULT_MIN_MTU) - return false; - } - - return true; -} - static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; @@ -499,11 +484,6 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us break; } - if (!l2cap_valid_mtu(chan, opts.imtu)) { - err = -EINVAL; - break; - } - chan->mode = opts.mode; switch (chan->mode) { case L2CAP_MODE_BASIC: @@ -893,34 +873,9 @@ static int l2cap_sock_release(struct socket *sock) return err; } -static void l2cap_sock_cleanup_listen(struct sock *parent) +static struct l2cap_chan *l2cap_sock_new_connection_cb(void *data) { - struct sock *sk; - - BT_DBG("parent %p", parent); - - /* Close not yet accepted channels */ - while ((sk = bt_accept_dequeue(parent, NULL))) { - struct l2cap_chan *chan = l2cap_pi(sk)->chan; - - l2cap_chan_lock(chan); - __clear_chan_timer(chan); - l2cap_chan_close(chan, ECONNRESET); - l2cap_chan_unlock(chan); - - l2cap_sock_kill(sk); - } -} - -static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan) -{ - struct sock *sk, *parent = chan->data; - - /* Check for backlog size */ - if (sk_acceptq_is_full(parent)) { - BT_DBG("backlog full %d", parent->sk_ack_backlog); - return NULL; - } + struct sock *sk, *parent = data; sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC); @@ -934,10 +889,10 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan) return l2cap_pi(sk)->chan; } -static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb) +static int l2cap_sock_recv_cb(void *data, struct sk_buff *skb) { int err; - struct sock *sk = chan->data; + struct sock *sk = data; struct l2cap_pinfo *pi = l2cap_pi(sk); lock_sock(sk); @@ -970,57 +925,16 @@ static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb) return err; } -static void l2cap_sock_close_cb(struct l2cap_chan *chan) +static void l2cap_sock_close_cb(void *data) { - struct sock *sk = chan->data; + struct sock *sk = data; l2cap_sock_kill(sk); } -static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err) +static void l2cap_sock_state_change_cb(void *data, int state) { - struct sock *sk = chan->data; - struct sock *parent; - - lock_sock(sk); - - parent = bt_sk(sk)->parent; - - sock_set_flag(sk, SOCK_ZAPPED); - - switch (chan->state) { - case BT_OPEN: - case BT_BOUND: - case BT_CLOSED: - break; - case BT_LISTEN: - l2cap_sock_cleanup_listen(sk); - sk->sk_state = BT_CLOSED; - chan->state = BT_CLOSED; - - break; - default: - sk->sk_state = BT_CLOSED; - chan->state = BT_CLOSED; - - sk->sk_err = err; - - if (parent) { - bt_accept_unlink(sk); - parent->sk_data_ready(parent, 0); - } else { - sk->sk_state_change(sk); - } - - break; - } - - release_sock(sk); -} - -static void l2cap_sock_state_change_cb(struct l2cap_chan *chan, int state) -{ - struct sock *sk = chan->data; + struct sock *sk = data; sk->sk_state = state; } @@ -1041,34 +955,12 @@ static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan, return skb; } -static void l2cap_sock_ready_cb(struct l2cap_chan *chan) -{ - struct sock *sk = chan->data; - struct sock *parent; - - lock_sock(sk); - - parent = bt_sk(sk)->parent; - - BT_DBG("sk %p, parent %p", sk, parent); - - sk->sk_state = BT_CONNECTED; - sk->sk_state_change(sk); - - if (parent) - parent->sk_data_ready(parent, 0); - - release_sock(sk); -} - static struct l2cap_ops l2cap_chan_ops = { .name = "L2CAP Socket Interface", .new_connection = l2cap_sock_new_connection_cb, .recv = l2cap_sock_recv_cb, .close = l2cap_sock_close_cb, - .teardown = l2cap_sock_teardown_cb, .state_change = l2cap_sock_state_change_cb, - .ready = l2cap_sock_ready_cb, .alloc_skb = l2cap_sock_alloc_skb_cb, }; diff --git a/trunk/net/bluetooth/lib.c b/trunk/net/bluetooth/lib.c index e1c97527e16c..506628876f36 100644 --- a/trunk/net/bluetooth/lib.c +++ b/trunk/net/bluetooth/lib.c @@ -26,7 +26,12 @@ #define pr_fmt(fmt) "Bluetooth: " fmt -#include +#include + +#include +#include +#include +#include #include diff --git a/trunk/net/bluetooth/mgmt.c b/trunk/net/bluetooth/mgmt.c index c72307cc25fc..25d220776079 100644 --- a/trunk/net/bluetooth/mgmt.c +++ b/trunk/net/bluetooth/mgmt.c @@ -24,6 +24,8 @@ /* Bluetooth HCI Management interface */ +#include +#include #include #include @@ -712,8 +714,7 @@ static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, } static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, - void (*cb)(struct pending_cmd *cmd, - void *data), + void (*cb)(struct pending_cmd *cmd, void *data), void *data) { struct list_head *p, *n; @@ -870,7 +871,7 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data, } if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) || - mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { + mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, MGMT_STATUS_BUSY); goto failed; @@ -977,7 +978,7 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data, } if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) || - mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { + mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE, MGMT_STATUS_BUSY); goto failed; @@ -1000,7 +1001,7 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data, scan = 0; if (test_bit(HCI_ISCAN, &hdev->flags) && - hdev->discov_timeout > 0) + hdev->discov_timeout > 0) cancel_delayed_work(&hdev->discov_off); } @@ -1055,7 +1056,7 @@ static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data, bool changed = false; if (!!cp->val != test_bit(HCI_LINK_SECURITY, - &hdev->dev_flags)) { + &hdev->dev_flags)) { change_bit(HCI_LINK_SECURITY, &hdev->dev_flags); changed = true; } @@ -1316,7 +1317,7 @@ static bool enable_service_cache(struct hci_dev *hdev) } static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data, - u16 len) + u16 len) { struct mgmt_cp_remove_uuid *cp = data; struct pending_cmd *cmd; @@ -1441,7 +1442,7 @@ static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data, } static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data, - u16 len) + u16 len) { struct mgmt_cp_load_link_keys *cp = data; u16 key_count, expected_len; @@ -1453,13 +1454,13 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data, sizeof(struct mgmt_link_key_info); if (expected_len != len) { BT_ERR("load_link_keys: expected %u bytes, got %u bytes", - len, expected_len); + len, expected_len); return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, MGMT_STATUS_INVALID_PARAMS); } BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys, - key_count); + key_count); hci_dev_lock(hdev); @@ -1534,10 +1535,10 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data, if (cp->disconnect) { if (cp->addr.type == BDADDR_BREDR) conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, - &cp->addr.bdaddr); + &cp->addr.bdaddr); else conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, - &cp->addr.bdaddr); + &cp->addr.bdaddr); } else { conn = NULL; } @@ -1593,8 +1594,7 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data, } if (cp->addr.type == BDADDR_BREDR) - conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, - &cp->addr.bdaddr); + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr); else conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr); @@ -1813,7 +1813,7 @@ static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data, hdev->io_capability = cp->io_capability; BT_DBG("%s IO capability set to 0x%02x", hdev->name, - hdev->io_capability); + hdev->io_capability); hci_dev_unlock(hdev); @@ -1821,7 +1821,7 @@ static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data, 0); } -static struct pending_cmd *find_pairing(struct hci_conn *conn) +static inline struct pending_cmd *find_pairing(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; struct pending_cmd *cmd; @@ -1873,22 +1873,6 @@ static void pairing_complete_cb(struct hci_conn *conn, u8 status) pairing_complete(cmd, mgmt_status(status)); } -static void le_connect_complete_cb(struct hci_conn *conn, u8 status) -{ - struct pending_cmd *cmd; - - BT_DBG("status %u", status); - - if (!status) - return; - - cmd = find_pairing(conn); - if (!cmd) - BT_DBG("Unable to find a pending command"); - else - pairing_complete(cmd, mgmt_status(status)); -} - static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { @@ -1927,15 +1911,8 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data, rp.addr.type = cp->addr.type; if (IS_ERR(conn)) { - int status; - - if (PTR_ERR(conn) == -EBUSY) - status = MGMT_STATUS_BUSY; - else - status = MGMT_STATUS_CONNECT_FAILED; - err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, - status, &rp, + MGMT_STATUS_CONNECT_FAILED, &rp, sizeof(rp)); goto unlock; } @@ -1957,8 +1934,6 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data, /* For LE, just connecting isn't a proof that the pairing finished */ if (cp->addr.type == BDADDR_BREDR) conn->connect_cfm_cb = pairing_complete_cb; - else - conn->connect_cfm_cb = le_connect_complete_cb; conn->security_cfm_cb = pairing_complete_cb; conn->disconn_cfm_cb = pairing_complete_cb; @@ -1966,7 +1941,7 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data, cmd->user_data = conn; if (conn->state == BT_CONNECTED && - hci_conn_security(conn, sec_level, auth_type)) + hci_conn_security(conn, sec_level, auth_type)) pairing_complete(cmd, 0); err = 0; @@ -2263,7 +2238,7 @@ static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev, } static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev, - void *data, u16 len) + void *data, u16 len) { struct mgmt_cp_remove_remote_oob_data *cp = data; u8 status; @@ -2432,7 +2407,7 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data, case DISCOVERY_RESOLVING: e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, - NAME_PENDING); + NAME_PENDING); if (!e) { mgmt_pending_remove(cmd); err = cmd_complete(sk, hdev->id, @@ -2654,7 +2629,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev, sizeof(struct mgmt_ltk_info); if (expected_len != len) { BT_ERR("load_keys: expected %u bytes, got %u bytes", - len, expected_len); + len, expected_len); return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, EINVAL); } @@ -2779,7 +2754,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen) } if (opcode >= ARRAY_SIZE(mgmt_handlers) || - mgmt_handlers[opcode].func == NULL) { + mgmt_handlers[opcode].func == NULL) { BT_DBG("Unknown op %u", opcode); err = cmd_status(sk, index, opcode, MGMT_STATUS_UNKNOWN_COMMAND); @@ -2787,7 +2762,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen) } if ((hdev && opcode < MGMT_OP_READ_INFO) || - (!hdev && opcode >= MGMT_OP_READ_INFO)) { + (!hdev && opcode >= MGMT_OP_READ_INFO)) { err = cmd_status(sk, index, opcode, MGMT_STATUS_INVALID_INDEX); goto done; @@ -2796,7 +2771,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen) handler = &mgmt_handlers[opcode]; if ((handler->var_len && len < handler->data_len) || - (!handler->var_len && len != handler->data_len)) { + (!handler->var_len && len != handler->data_len)) { err = cmd_status(sk, index, opcode, MGMT_STATUS_INVALID_PARAMS); goto done; @@ -2980,7 +2955,7 @@ int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, bacpy(&ev.key.addr.bdaddr, &key->bdaddr); ev.key.addr.type = BDADDR_BREDR; ev.key.type = key->type; - memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE); + memcpy(ev.key.val, key->val, 16); ev.key.pin_len = key->pin_len; return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL); @@ -3115,7 +3090,7 @@ int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, mgmt_pending_remove(cmd); mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp, - hdev); + hdev); return err; } @@ -3205,7 +3180,7 @@ int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr, } int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr, - u8 link_type, u8 addr_type) + u8 link_type, u8 addr_type) { struct mgmt_ev_user_passkey_request ev; @@ -3219,8 +3194,8 @@ int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr, } static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, - u8 link_type, u8 addr_type, u8 status, - u8 opcode) + u8 link_type, u8 addr_type, u8 status, + u8 opcode) { struct pending_cmd *cmd; struct mgmt_rp_user_confirm_reply rp; @@ -3251,8 +3226,7 @@ int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 status) { return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type, - status, - MGMT_OP_USER_CONFIRM_NEG_REPLY); + status, MGMT_OP_USER_CONFIRM_NEG_REPLY); } int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, @@ -3266,8 +3240,7 @@ int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 status) { return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type, - status, - MGMT_OP_USER_PASSKEY_NEG_REPLY); + status, MGMT_OP_USER_PASSKEY_NEG_REPLY); } int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, diff --git a/trunk/net/bluetooth/rfcomm/core.c b/trunk/net/bluetooth/rfcomm/core.c index c75107ef8920..8a602388f1e7 100644 --- a/trunk/net/bluetooth/rfcomm/core.c +++ b/trunk/net/bluetooth/rfcomm/core.c @@ -26,8 +26,22 @@ */ #include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include +#include #include +#include + +#include +#include #include #include @@ -101,14 +115,14 @@ static void rfcomm_session_del(struct rfcomm_session *s); #define __get_rpn_stop_bits(line) (((line) >> 2) & 0x1) #define __get_rpn_parity(line) (((line) >> 3) & 0x7) -static void rfcomm_schedule(void) +static inline void rfcomm_schedule(void) { if (!rfcomm_thread) return; wake_up_process(rfcomm_thread); } -static void rfcomm_session_put(struct rfcomm_session *s) +static inline void rfcomm_session_put(struct rfcomm_session *s) { if (atomic_dec_and_test(&s->refcnt)) rfcomm_session_del(s); @@ -213,7 +227,7 @@ static int rfcomm_l2sock_create(struct socket **sock) return err; } -static int rfcomm_check_security(struct rfcomm_dlc *d) +static inline int rfcomm_check_security(struct rfcomm_dlc *d) { struct sock *sk = d->session->sock->sk; struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn; @@ -1736,7 +1750,7 @@ static void rfcomm_process_connect(struct rfcomm_session *s) /* Send data queued for the DLC. * Return number of frames left in the queue. */ -static int rfcomm_process_tx(struct rfcomm_dlc *d) +static inline int rfcomm_process_tx(struct rfcomm_dlc *d) { struct sk_buff *skb; int err; @@ -1784,7 +1798,7 @@ static int rfcomm_process_tx(struct rfcomm_dlc *d) return skb_queue_len(&d->tx_queue); } -static void rfcomm_process_dlcs(struct rfcomm_session *s) +static inline void rfcomm_process_dlcs(struct rfcomm_session *s) { struct rfcomm_dlc *d; struct list_head *p, *n; @@ -1844,7 +1858,7 @@ static void rfcomm_process_dlcs(struct rfcomm_session *s) } } -static void rfcomm_process_rx(struct rfcomm_session *s) +static inline void rfcomm_process_rx(struct rfcomm_session *s) { struct socket *sock = s->sock; struct sock *sk = sock->sk; @@ -1869,7 +1883,7 @@ static void rfcomm_process_rx(struct rfcomm_session *s) } } -static void rfcomm_accept_connection(struct rfcomm_session *s) +static inline void rfcomm_accept_connection(struct rfcomm_session *s) { struct socket *sock = s->sock, *nsock; int err; @@ -1903,7 +1917,7 @@ static void rfcomm_accept_connection(struct rfcomm_session *s) sock_release(nsock); } -static void rfcomm_check_connection(struct rfcomm_session *s) +static inline void rfcomm_check_connection(struct rfcomm_session *s) { struct sock *sk = s->sock->sk; @@ -1927,7 +1941,7 @@ static void rfcomm_check_connection(struct rfcomm_session *s) } } -static void rfcomm_process_sessions(void) +static inline void rfcomm_process_sessions(void) { struct list_head *p, *n; diff --git a/trunk/net/bluetooth/rfcomm/sock.c b/trunk/net/bluetooth/rfcomm/sock.c index 7e1e59645c05..e8707debb864 100644 --- a/trunk/net/bluetooth/rfcomm/sock.c +++ b/trunk/net/bluetooth/rfcomm/sock.c @@ -25,8 +25,27 @@ * RFCOMM sockets. */ -#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include +#include + +#include #include #include diff --git a/trunk/net/bluetooth/rfcomm/tty.c b/trunk/net/bluetooth/rfcomm/tty.c index cb960773c002..d1820ff14aee 100644 --- a/trunk/net/bluetooth/rfcomm/tty.c +++ b/trunk/net/bluetooth/rfcomm/tty.c @@ -31,6 +31,11 @@ #include #include +#include +#include +#include +#include + #include #include #include @@ -127,7 +132,7 @@ static struct rfcomm_dev *__rfcomm_dev_get(int id) return NULL; } -static struct rfcomm_dev *rfcomm_dev_get(int id) +static inline struct rfcomm_dev *rfcomm_dev_get(int id) { struct rfcomm_dev *dev; @@ -340,7 +345,7 @@ static void rfcomm_wfree(struct sk_buff *skb) tty_port_put(&dev->port); } -static void rfcomm_set_owner_w(struct sk_buff *skb, struct rfcomm_dev *dev) +static inline void rfcomm_set_owner_w(struct sk_buff *skb, struct rfcomm_dev *dev) { tty_port_get(&dev->port); atomic_add(skb->truesize, &dev->wmem_alloc); diff --git a/trunk/net/bluetooth/sco.c b/trunk/net/bluetooth/sco.c index 40bbe25dcff7..cbdd313659a7 100644 --- a/trunk/net/bluetooth/sco.c +++ b/trunk/net/bluetooth/sco.c @@ -25,8 +25,26 @@ /* Bluetooth SCO sockets. */ #include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include #include +#include +#include +#include + +#include #include #include @@ -105,7 +123,7 @@ static struct sco_conn *sco_conn_add(struct hci_conn *hcon) return conn; } -static struct sock *sco_chan_get(struct sco_conn *conn) +static inline struct sock *sco_chan_get(struct sco_conn *conn) { struct sock *sk = NULL; sco_conn_lock(conn); @@ -139,8 +157,7 @@ static int sco_conn_del(struct hci_conn *hcon, int err) return 0; } -static int sco_chan_add(struct sco_conn *conn, struct sock *sk, - struct sock *parent) +static inline int sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent) { int err = 0; @@ -211,7 +228,7 @@ static int sco_connect(struct sock *sk) return err; } -static int sco_send_frame(struct sock *sk, struct msghdr *msg, int len) +static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len) { struct sco_conn *conn = sco_pi(sk)->conn; struct sk_buff *skb; @@ -237,7 +254,7 @@ static int sco_send_frame(struct sock *sk, struct msghdr *msg, int len) return len; } -static void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb) +static inline void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb) { struct sock *sk = sco_chan_get(conn); @@ -506,7 +523,7 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen goto done; err = bt_sock_wait_state(sk, BT_CONNECTED, - sock_sndtimeo(sk, flags & O_NONBLOCK)); + sock_sndtimeo(sk, flags & O_NONBLOCK)); done: release_sock(sk); @@ -771,7 +788,7 @@ static int sco_sock_shutdown(struct socket *sock, int how) if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) err = bt_sock_wait_state(sk, BT_CLOSED, - sk->sk_lingertime); + sk->sk_lingertime); } release_sock(sk); return err; @@ -861,7 +878,7 @@ static void sco_conn_ready(struct sco_conn *conn) bh_lock_sock(parent); sk = sco_sock_alloc(sock_net(parent), NULL, - BTPROTO_SCO, GFP_ATOMIC); + BTPROTO_SCO, GFP_ATOMIC); if (!sk) { bh_unlock_sock(parent); goto done; @@ -890,7 +907,7 @@ static void sco_conn_ready(struct sco_conn *conn) /* ----- SCO interface with lower layer (HCI) ----- */ int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr) { - struct sock *sk; + register struct sock *sk; struct hlist_node *node; int lm = 0; @@ -903,7 +920,7 @@ int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr) continue; if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr) || - !bacmp(&bt_sk(sk)->src, BDADDR_ANY)) { + !bacmp(&bt_sk(sk)->src, BDADDR_ANY)) { lm |= HCI_LM_ACCEPT; break; } @@ -964,7 +981,7 @@ static int sco_debugfs_show(struct seq_file *f, void *p) sk_for_each(sk, node, &sco_sk_list.head) { seq_printf(f, "%s %s %d\n", batostr(&bt_sk(sk)->src), - batostr(&bt_sk(sk)->dst), sk->sk_state); + batostr(&bt_sk(sk)->dst), sk->sk_state); } read_unlock(&sco_sk_list.lock); @@ -1027,8 +1044,8 @@ int __init sco_init(void) } if (bt_debugfs) { - sco_debugfs = debugfs_create_file("sco", 0444, bt_debugfs, - NULL, &sco_debugfs_fops); + sco_debugfs = debugfs_create_file("sco", 0444, + bt_debugfs, NULL, &sco_debugfs_fops); if (!sco_debugfs) BT_ERR("Failed to create SCO debug file"); } diff --git a/trunk/net/bluetooth/smp.c b/trunk/net/bluetooth/smp.c index 16ef0dc85a0a..6fc7c4708f3e 100644 --- a/trunk/net/bluetooth/smp.c +++ b/trunk/net/bluetooth/smp.c @@ -20,15 +20,14 @@ SOFTWARE IS DISCLAIMED. */ -#include -#include -#include - #include #include #include #include #include +#include +#include +#include #define SMP_TIMEOUT msecs_to_jiffies(30000) @@ -649,7 +648,7 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb) auth |= (req->auth_req | rsp->auth_req) & SMP_AUTH_MITM; - ret = tk_request(conn, 0, auth, req->io_capability, rsp->io_capability); + ret = tk_request(conn, 0, auth, rsp->io_capability, req->io_capability); if (ret) return SMP_UNSPECIFIED; @@ -704,7 +703,7 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb) return 0; } -static u8 smp_ltk_encrypt(struct l2cap_conn *conn, u8 sec_level) +static u8 smp_ltk_encrypt(struct l2cap_conn *conn) { struct smp_ltk *key; struct hci_conn *hcon = conn->hcon; @@ -713,9 +712,6 @@ static u8 smp_ltk_encrypt(struct l2cap_conn *conn, u8 sec_level) if (!key) return 0; - if (sec_level > BT_SECURITY_MEDIUM && !key->authenticated) - return 0; - if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags)) return 1; @@ -736,7 +732,7 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb) hcon->pending_sec_level = authreq_to_seclevel(rp->auth_req); - if (smp_ltk_encrypt(conn, hcon->pending_sec_level)) + if (smp_ltk_encrypt(conn)) return 0; if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) @@ -775,7 +771,7 @@ int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level) return 1; if (hcon->link_mode & HCI_LM_MASTER) - if (smp_ltk_encrypt(conn, sec_level)) + if (smp_ltk_encrypt(conn)) goto done; if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) diff --git a/trunk/net/can/af_can.c b/trunk/net/can/af_can.c index 821022a7214f..6efcd37b4bd0 100644 --- a/trunk/net/can/af_can.c +++ b/trunk/net/can/af_can.c @@ -41,7 +41,6 @@ */ #include -#include #include #include #include @@ -221,46 +220,30 @@ static int can_create(struct net *net, struct socket *sock, int protocol, * -ENOBUFS on full driver queue (see net_xmit_errno()) * -ENOMEM when local loopback failed at calling skb_clone() * -EPERM when trying to send on a non-CAN interface - * -EMSGSIZE CAN frame size is bigger than CAN interface MTU * -EINVAL when the skb->data does not contain a valid CAN frame */ int can_send(struct sk_buff *skb, int loop) { struct sk_buff *newskb = NULL; - struct canfd_frame *cfd = (struct canfd_frame *)skb->data; - int err = -EINVAL; - - if (skb->len == CAN_MTU) { - skb->protocol = htons(ETH_P_CAN); - if (unlikely(cfd->len > CAN_MAX_DLEN)) - goto inval_skb; - } else if (skb->len == CANFD_MTU) { - skb->protocol = htons(ETH_P_CANFD); - if (unlikely(cfd->len > CANFD_MAX_DLEN)) - goto inval_skb; - } else - goto inval_skb; + struct can_frame *cf = (struct can_frame *)skb->data; + int err; - /* - * Make sure the CAN frame can pass the selected CAN netdevice. - * As structs can_frame and canfd_frame are similar, we can provide - * CAN FD frames to legacy CAN drivers as long as the length is <= 8 - */ - if (unlikely(skb->len > skb->dev->mtu && cfd->len > CAN_MAX_DLEN)) { - err = -EMSGSIZE; - goto inval_skb; + if (skb->len != sizeof(struct can_frame) || cf->can_dlc > 8) { + kfree_skb(skb); + return -EINVAL; } - if (unlikely(skb->dev->type != ARPHRD_CAN)) { - err = -EPERM; - goto inval_skb; + if (skb->dev->type != ARPHRD_CAN) { + kfree_skb(skb); + return -EPERM; } - if (unlikely(!(skb->dev->flags & IFF_UP))) { - err = -ENETDOWN; - goto inval_skb; + if (!(skb->dev->flags & IFF_UP)) { + kfree_skb(skb); + return -ENETDOWN; } + skb->protocol = htons(ETH_P_CAN); skb_reset_network_header(skb); skb_reset_transport_header(skb); @@ -317,10 +300,6 @@ int can_send(struct sk_buff *skb, int loop) can_stats.tx_frames_delta++; return 0; - -inval_skb: - kfree_skb(skb); - return err; } EXPORT_SYMBOL(can_send); @@ -653,11 +632,24 @@ static int can_rcv_filter(struct dev_rcv_lists *d, struct sk_buff *skb) return matches; } -static void can_receive(struct sk_buff *skb, struct net_device *dev) +static int can_rcv(struct sk_buff *skb, struct net_device *dev, + struct packet_type *pt, struct net_device *orig_dev) { struct dev_rcv_lists *d; + struct can_frame *cf = (struct can_frame *)skb->data; int matches; + if (!net_eq(dev_net(dev), &init_net)) + goto drop; + + if (WARN_ONCE(dev->type != ARPHRD_CAN || + skb->len != sizeof(struct can_frame) || + cf->can_dlc > 8, + "PF_CAN: dropped non conform skbuf: " + "dev type %d, len %d, can_dlc %d\n", + dev->type, skb->len, cf->can_dlc)) + goto drop; + /* update statistics */ can_stats.rx_frames++; can_stats.rx_frames_delta++; @@ -681,49 +673,7 @@ static void can_receive(struct sk_buff *skb, struct net_device *dev) can_stats.matches++; can_stats.matches_delta++; } -} -static int can_rcv(struct sk_buff *skb, struct net_device *dev, - struct packet_type *pt, struct net_device *orig_dev) -{ - struct canfd_frame *cfd = (struct canfd_frame *)skb->data; - - if (unlikely(!net_eq(dev_net(dev), &init_net))) - goto drop; - - if (WARN_ONCE(dev->type != ARPHRD_CAN || - skb->len != CAN_MTU || - cfd->len > CAN_MAX_DLEN, - "PF_CAN: dropped non conform CAN skbuf: " - "dev type %d, len %d, datalen %d\n", - dev->type, skb->len, cfd->len)) - goto drop; - - can_receive(skb, dev); - return NET_RX_SUCCESS; - -drop: - kfree_skb(skb); - return NET_RX_DROP; -} - -static int canfd_rcv(struct sk_buff *skb, struct net_device *dev, - struct packet_type *pt, struct net_device *orig_dev) -{ - struct canfd_frame *cfd = (struct canfd_frame *)skb->data; - - if (unlikely(!net_eq(dev_net(dev), &init_net))) - goto drop; - - if (WARN_ONCE(dev->type != ARPHRD_CAN || - skb->len != CANFD_MTU || - cfd->len > CANFD_MAX_DLEN, - "PF_CAN: dropped non conform CAN FD skbuf: " - "dev type %d, len %d, datalen %d\n", - dev->type, skb->len, cfd->len)) - goto drop; - - can_receive(skb, dev); return NET_RX_SUCCESS; drop: @@ -857,14 +807,10 @@ static int can_notifier(struct notifier_block *nb, unsigned long msg, static struct packet_type can_packet __read_mostly = { .type = cpu_to_be16(ETH_P_CAN), + .dev = NULL, .func = can_rcv, }; -static struct packet_type canfd_packet __read_mostly = { - .type = cpu_to_be16(ETH_P_CANFD), - .func = canfd_rcv, -}; - static const struct net_proto_family can_family_ops = { .family = PF_CAN, .create = can_create, @@ -878,12 +824,6 @@ static struct notifier_block can_netdev_notifier __read_mostly = { static __init int can_init(void) { - /* check for correct padding to be able to use the structs similarly */ - BUILD_BUG_ON(offsetof(struct can_frame, can_dlc) != - offsetof(struct canfd_frame, len) || - offsetof(struct can_frame, data) != - offsetof(struct canfd_frame, data)); - printk(banner); memset(&can_rx_alldev_list, 0, sizeof(can_rx_alldev_list)); @@ -906,7 +846,6 @@ static __init int can_init(void) sock_register(&can_family_ops); register_netdevice_notifier(&can_netdev_notifier); dev_add_pack(&can_packet); - dev_add_pack(&canfd_packet); return 0; } @@ -921,7 +860,6 @@ static __exit void can_exit(void) can_remove_proc(); /* protocol unregister */ - dev_remove_pack(&canfd_packet); dev_remove_pack(&can_packet); unregister_netdevice_notifier(&can_netdev_notifier); sock_unregister(PF_CAN); diff --git a/trunk/net/can/raw.c b/trunk/net/can/raw.c index 3e9c89356a93..46cca3a91d19 100644 --- a/trunk/net/can/raw.c +++ b/trunk/net/can/raw.c @@ -82,7 +82,6 @@ struct raw_sock { struct notifier_block notifier; int loopback; int recv_own_msgs; - int fd_frames; int count; /* number of active filters */ struct can_filter dfilter; /* default/single filter */ struct can_filter *filter; /* pointer to filter(s) */ @@ -120,14 +119,6 @@ static void raw_rcv(struct sk_buff *oskb, void *data) if (!ro->recv_own_msgs && oskb->sk == sk) return; - /* do not pass frames with DLC > 8 to a legacy socket */ - if (!ro->fd_frames) { - struct canfd_frame *cfd = (struct canfd_frame *)oskb->data; - - if (unlikely(cfd->len > CAN_MAX_DLEN)) - return; - } - /* clone the given skb to be able to enqueue it into the rcv queue */ skb = skb_clone(oskb, GFP_ATOMIC); if (!skb) @@ -300,7 +291,6 @@ static int raw_init(struct sock *sk) /* set default loopback behaviour */ ro->loopback = 1; ro->recv_own_msgs = 0; - ro->fd_frames = 0; /* set notifier */ ro->notifier.notifier_call = raw_notifier; @@ -579,15 +569,6 @@ static int raw_setsockopt(struct socket *sock, int level, int optname, break; - case CAN_RAW_FD_FRAMES: - if (optlen != sizeof(ro->fd_frames)) - return -EINVAL; - - if (copy_from_user(&ro->fd_frames, optval, optlen)) - return -EFAULT; - - break; - default: return -ENOPROTOOPT; } @@ -646,12 +627,6 @@ static int raw_getsockopt(struct socket *sock, int level, int optname, val = &ro->recv_own_msgs; break; - case CAN_RAW_FD_FRAMES: - if (len > sizeof(int)) - len = sizeof(int); - val = &ro->fd_frames; - break; - default: return -ENOPROTOOPT; } @@ -687,13 +662,8 @@ static int raw_sendmsg(struct kiocb *iocb, struct socket *sock, } else ifindex = ro->ifindex; - if (ro->fd_frames) { - if (unlikely(size != CANFD_MTU && size != CAN_MTU)) - return -EINVAL; - } else { - if (unlikely(size != CAN_MTU)) - return -EINVAL; - } + if (size != sizeof(struct can_frame)) + return -EINVAL; dev = dev_get_by_index(&init_net, ifindex); if (!dev) @@ -735,9 +705,7 @@ static int raw_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; - struct raw_sock *ro = raw_sk(sk); struct sk_buff *skb; - int rxmtu; int err = 0; int noblock; @@ -748,20 +716,10 @@ static int raw_recvmsg(struct kiocb *iocb, struct socket *sock, if (!skb) return err; - /* - * when serving a legacy socket the DLC <= 8 is already checked inside - * raw_rcv(). Now check if we need to pass a canfd_frame to a legacy - * socket and cut the possible CANFD_MTU/CAN_MTU length to CAN_MTU - */ - if (!ro->fd_frames) - rxmtu = CAN_MTU; - else - rxmtu = skb->len; - - if (size < rxmtu) + if (size < skb->len) msg->msg_flags |= MSG_TRUNC; else - size = rxmtu; + size = skb->len; err = memcpy_toiovec(msg->msg_iov, skb->data, size); if (err < 0) { diff --git a/trunk/net/core/sock.c b/trunk/net/core/sock.c index 929bdcc2383b..9e5b71fda6ec 100644 --- a/trunk/net/core/sock.c +++ b/trunk/net/core/sock.c @@ -1465,11 +1465,6 @@ void sock_rfree(struct sk_buff *skb) } EXPORT_SYMBOL(sock_rfree); -void sock_edemux(struct sk_buff *skb) -{ - sock_put(skb->sk); -} -EXPORT_SYMBOL(sock_edemux); int sock_i_uid(struct sock *sk) { diff --git a/trunk/net/dcb/dcbnl.c b/trunk/net/dcb/dcbnl.c index 013da86575e8..0a360072cfec 100644 --- a/trunk/net/dcb/dcbnl.c +++ b/trunk/net/dcb/dcbnl.c @@ -852,7 +852,8 @@ static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlmsghdr *nlh, } } - return nla_put_u8(skb, DCB_ATTR_PG_CFG, 0); + return nla_put_u8(skb, + (dir ? DCB_CMD_PGRX_SCFG : DCB_CMD_PGTX_SCFG), 0); } static int dcbnl_pgtx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh, diff --git a/trunk/net/dccp/ipv4.c b/trunk/net/dccp/ipv4.c index 3eb76b5f221a..07f5579ca756 100644 --- a/trunk/net/dccp/ipv4.c +++ b/trunk/net/dccp/ipv4.c @@ -504,7 +504,7 @@ static int dccp_v4_send_response(struct sock *sk, struct request_sock *req, struct dst_entry *dst; struct flowi4 fl4; - dst = inet_csk_route_req(sk, &fl4, req, false); + dst = inet_csk_route_req(sk, &fl4, req); if (dst == NULL) goto out; diff --git a/trunk/net/ipv4/af_inet.c b/trunk/net/ipv4/af_inet.c index 07a02f6e9696..e4e8e00a2c91 100644 --- a/trunk/net/ipv4/af_inet.c +++ b/trunk/net/ipv4/af_inet.c @@ -157,7 +157,6 @@ void inet_sock_destruct(struct sock *sk) kfree(rcu_dereference_protected(inet->inet_opt, 1)); dst_release(rcu_dereference_check(sk->sk_dst_cache, 1)); - dst_release(sk->sk_rx_dst); sk_refcnt_debug_dec(sk); } EXPORT_SYMBOL(inet_sock_destruct); @@ -243,18 +242,20 @@ void build_ehash_secret(void) } EXPORT_SYMBOL(build_ehash_secret); -static inline int inet_netns_ok(struct net *net, __u8 protocol) +static inline int inet_netns_ok(struct net *net, int protocol) { + int hash; const struct net_protocol *ipprot; if (net_eq(net, &init_net)) return 1; - ipprot = rcu_dereference(inet_protos[protocol]); - if (ipprot == NULL) { + hash = protocol & (MAX_INET_PROTOS - 1); + ipprot = rcu_dereference(inet_protos[hash]); + + if (ipprot == NULL) /* raw IP is OK */ return 1; - } return ipprot->netns_ok; } @@ -1215,8 +1216,8 @@ EXPORT_SYMBOL(inet_sk_rebuild_header); static int inet_gso_send_check(struct sk_buff *skb) { - const struct net_protocol *ops; const struct iphdr *iph; + const struct net_protocol *ops; int proto; int ihl; int err = -EINVAL; @@ -1235,7 +1236,7 @@ static int inet_gso_send_check(struct sk_buff *skb) __skb_pull(skb, ihl); skb_reset_transport_header(skb); iph = ip_hdr(skb); - proto = iph->protocol; + proto = iph->protocol & (MAX_INET_PROTOS - 1); err = -EPROTONOSUPPORT; rcu_read_lock(); @@ -1252,8 +1253,8 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, netdev_features_t features) { struct sk_buff *segs = ERR_PTR(-EINVAL); - const struct net_protocol *ops; struct iphdr *iph; + const struct net_protocol *ops; int proto; int ihl; int id; @@ -1285,7 +1286,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, skb_reset_transport_header(skb); iph = ip_hdr(skb); id = ntohs(iph->id); - proto = iph->protocol; + proto = iph->protocol & (MAX_INET_PROTOS - 1); segs = ERR_PTR(-EPROTONOSUPPORT); rcu_read_lock(); @@ -1339,7 +1340,7 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head, goto out; } - proto = iph->protocol; + proto = iph->protocol & (MAX_INET_PROTOS - 1); rcu_read_lock(); ops = rcu_dereference(inet_protos[proto]); @@ -1397,11 +1398,11 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head, static int inet_gro_complete(struct sk_buff *skb) { - __be16 newlen = htons(skb->len - skb_network_offset(skb)); - struct iphdr *iph = ip_hdr(skb); const struct net_protocol *ops; - int proto = iph->protocol; + struct iphdr *iph = ip_hdr(skb); + int proto = iph->protocol & (MAX_INET_PROTOS - 1); int err = -ENOSYS; + __be16 newlen = htons(skb->len - skb_network_offset(skb)); csum_replace2(&iph->check, iph->tot_len, newlen); iph->tot_len = newlen; @@ -1519,15 +1520,14 @@ static const struct net_protocol igmp_protocol = { #endif static const struct net_protocol tcp_protocol = { - .early_demux = tcp_v4_early_demux, - .handler = tcp_v4_rcv, - .err_handler = tcp_v4_err, - .gso_send_check = tcp_v4_gso_send_check, - .gso_segment = tcp_tso_segment, - .gro_receive = tcp4_gro_receive, - .gro_complete = tcp4_gro_complete, - .no_policy = 1, - .netns_ok = 1, + .handler = tcp_v4_rcv, + .err_handler = tcp_v4_err, + .gso_send_check = tcp_v4_gso_send_check, + .gso_segment = tcp_tso_segment, + .gro_receive = tcp4_gro_receive, + .gro_complete = tcp4_gro_complete, + .no_policy = 1, + .netns_ok = 1, }; static const struct net_protocol udp_protocol = { diff --git a/trunk/net/ipv4/icmp.c b/trunk/net/ipv4/icmp.c index 49a74cc79dc8..e1caa1abe5d1 100644 --- a/trunk/net/ipv4/icmp.c +++ b/trunk/net/ipv4/icmp.c @@ -637,12 +637,12 @@ EXPORT_SYMBOL(icmp_send); static void icmp_unreach(struct sk_buff *skb) { - const struct net_protocol *ipprot; const struct iphdr *iph; struct icmphdr *icmph; - struct net *net; + int hash, protocol; + const struct net_protocol *ipprot; u32 info = 0; - int protocol; + struct net *net; net = dev_net(skb_dst(skb)->dev); @@ -731,8 +731,9 @@ static void icmp_unreach(struct sk_buff *skb) */ raw_icmp_error(skb, protocol, info); + hash = protocol & (MAX_INET_PROTOS - 1); rcu_read_lock(); - ipprot = rcu_dereference(inet_protos[protocol]); + ipprot = rcu_dereference(inet_protos[hash]); if (ipprot && ipprot->err_handler) ipprot->err_handler(skb, info); rcu_read_unlock(); diff --git a/trunk/net/ipv4/inet_connection_sock.c b/trunk/net/ipv4/inet_connection_sock.c index 034ddbe42adf..f9ee7417f6a0 100644 --- a/trunk/net/ipv4/inet_connection_sock.c +++ b/trunk/net/ipv4/inet_connection_sock.c @@ -368,21 +368,17 @@ EXPORT_SYMBOL(inet_csk_reset_keepalive_timer); struct dst_entry *inet_csk_route_req(struct sock *sk, struct flowi4 *fl4, - const struct request_sock *req, - bool nocache) + const struct request_sock *req) { struct rtable *rt; const struct inet_request_sock *ireq = inet_rsk(req); struct ip_options_rcu *opt = inet_rsk(req)->opt; struct net *net = sock_net(sk); - int flags = inet_sk_flowi_flags(sk) & ~FLOWI_FLAG_PRECOW_METRICS; - if (nocache) - flags |= FLOWI_FLAG_RT_NOCACHE; flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark, RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, sk->sk_protocol, - flags, + inet_sk_flowi_flags(sk) & ~FLOWI_FLAG_PRECOW_METRICS, (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr, ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport); security_req_classify_flow(req, flowi4_to_flowi(fl4)); diff --git a/trunk/net/ipv4/inetpeer.c b/trunk/net/ipv4/inetpeer.c index da90a8cab614..cac02ad1425d 100644 --- a/trunk/net/ipv4/inetpeer.c +++ b/trunk/net/ipv4/inetpeer.c @@ -126,7 +126,7 @@ int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min static void inetpeer_gc_worker(struct work_struct *work) { - struct inet_peer *p, *n, *c; + struct inet_peer *p, *n; LIST_HEAD(list); spin_lock_bh(&gc_lock); @@ -138,19 +138,17 @@ static void inetpeer_gc_worker(struct work_struct *work) list_for_each_entry_safe(p, n, &list, gc_list) { - if (need_resched()) + if(need_resched()) cond_resched(); - c = rcu_dereference_protected(p->avl_left, 1); - if (c != peer_avl_empty) { - list_add_tail(&c->gc_list, &list); - p->avl_left = peer_avl_empty_rcu; + if (p->avl_left != peer_avl_empty) { + list_add_tail(&p->avl_left->gc_list, &list); + p->avl_left = peer_avl_empty; } - c = rcu_dereference_protected(p->avl_right, 1); - if (c != peer_avl_empty) { - list_add_tail(&c->gc_list, &list); - p->avl_right = peer_avl_empty_rcu; + if (p->avl_right != peer_avl_empty) { + list_add_tail(&p->avl_right->gc_list, &list); + p->avl_right = peer_avl_empty; } n = list_entry(p->gc_list.next, struct inet_peer, gc_list); @@ -589,17 +587,23 @@ static void inetpeer_inval_rcu(struct rcu_head *head) void inetpeer_invalidate_tree(struct inet_peer_base *base) { - struct inet_peer *root; + struct inet_peer *old, *new, *prev; write_seqlock_bh(&base->lock); - root = rcu_deref_locked(base->root, base); - if (root != peer_avl_empty) { - base->root = peer_avl_empty_rcu; + old = base->root; + if (old == peer_avl_empty_rcu) + goto out; + + new = peer_avl_empty_rcu; + + prev = cmpxchg(&base->root, old, new); + if (prev == old) { base->total = 0; - call_rcu(&root->gc_rcu, inetpeer_inval_rcu); + call_rcu(&prev->gc_rcu, inetpeer_inval_rcu); } +out: write_sequnlock_bh(&base->lock); } EXPORT_SYMBOL(inetpeer_invalidate_tree); diff --git a/trunk/net/ipv4/ip_input.c b/trunk/net/ipv4/ip_input.c index bca25179cdb9..8590144ca330 100644 --- a/trunk/net/ipv4/ip_input.c +++ b/trunk/net/ipv4/ip_input.c @@ -198,13 +198,14 @@ static int ip_local_deliver_finish(struct sk_buff *skb) rcu_read_lock(); { int protocol = ip_hdr(skb)->protocol; + int hash, raw; const struct net_protocol *ipprot; - int raw; resubmit: raw = raw_local_deliver(skb, protocol); - ipprot = rcu_dereference(inet_protos[protocol]); + hash = protocol & (MAX_INET_PROTOS - 1); + ipprot = rcu_dereference(inet_protos[hash]); if (ipprot != NULL) { int ret; @@ -313,8 +314,6 @@ static inline bool ip_rcv_options(struct sk_buff *skb) return true; } -int sysctl_ip_early_demux __read_mostly = 1; - static int ip_rcv_finish(struct sk_buff *skb) { const struct iphdr *iph = ip_hdr(skb); @@ -325,34 +324,19 @@ static int ip_rcv_finish(struct sk_buff *skb) * how the packet travels inside Linux networking. */ if (skb_dst(skb) == NULL) { - int err = -ENOENT; - - if (sysctl_ip_early_demux) { - const struct net_protocol *ipprot; - int protocol = iph->protocol; - - rcu_read_lock(); - ipprot = rcu_dereference(inet_protos[protocol]); - if (ipprot && ipprot->early_demux) - err = ipprot->early_demux(skb); - rcu_read_unlock(); - } - - if (err) { - err = ip_route_input_noref(skb, iph->daddr, iph->saddr, - iph->tos, skb->dev); - if (unlikely(err)) { - if (err == -EHOSTUNREACH) - IP_INC_STATS_BH(dev_net(skb->dev), - IPSTATS_MIB_INADDRERRORS); - else if (err == -ENETUNREACH) - IP_INC_STATS_BH(dev_net(skb->dev), - IPSTATS_MIB_INNOROUTES); - else if (err == -EXDEV) - NET_INC_STATS_BH(dev_net(skb->dev), - LINUX_MIB_IPRPFILTER); - goto drop; - } + int err = ip_route_input_noref(skb, iph->daddr, iph->saddr, + iph->tos, skb->dev); + if (unlikely(err)) { + if (err == -EHOSTUNREACH) + IP_INC_STATS_BH(dev_net(skb->dev), + IPSTATS_MIB_INADDRERRORS); + else if (err == -ENETUNREACH) + IP_INC_STATS_BH(dev_net(skb->dev), + IPSTATS_MIB_INNOROUTES); + else if (err == -EXDEV) + NET_INC_STATS_BH(dev_net(skb->dev), + LINUX_MIB_IPRPFILTER); + goto drop; } } diff --git a/trunk/net/ipv4/protocol.c b/trunk/net/ipv4/protocol.c index 8918eff1426d..9ae5c01cd0b2 100644 --- a/trunk/net/ipv4/protocol.c +++ b/trunk/net/ipv4/protocol.c @@ -36,7 +36,9 @@ const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS] __read_mostly; int inet_add_protocol(const struct net_protocol *prot, unsigned char protocol) { - return !cmpxchg((const struct net_protocol **)&inet_protos[protocol], + int hash = protocol & (MAX_INET_PROTOS - 1); + + return !cmpxchg((const struct net_protocol **)&inet_protos[hash], NULL, prot) ? 0 : -1; } EXPORT_SYMBOL(inet_add_protocol); @@ -47,9 +49,9 @@ EXPORT_SYMBOL(inet_add_protocol); int inet_del_protocol(const struct net_protocol *prot, unsigned char protocol) { - int ret; + int ret, hash = protocol & (MAX_INET_PROTOS - 1); - ret = (cmpxchg((const struct net_protocol **)&inet_protos[protocol], + ret = (cmpxchg((const struct net_protocol **)&inet_protos[hash], prot, NULL) == prot) ? 0 : -1; synchronize_net(); diff --git a/trunk/net/ipv4/route.c b/trunk/net/ipv4/route.c index 8d62d85e68dc..a91f6d33804c 100644 --- a/trunk/net/ipv4/route.c +++ b/trunk/net/ipv4/route.c @@ -1156,7 +1156,7 @@ static struct rtable *rt_intern_hash(unsigned int hash, struct rtable *rt, candp = NULL; now = jiffies; - if (!rt_caching(dev_net(rt->dst.dev)) || (rt->dst.flags & DST_NOCACHE)) { + if (!rt_caching(dev_net(rt->dst.dev))) { /* * If we're not caching, just tell the caller we * were successful and don't touch the route. The @@ -2582,9 +2582,6 @@ static struct rtable *__mkroute_output(const struct fib_result *res, rt_set_nexthop(rth, fl4, res, fi, type, 0); - if (fl4->flowi4_flags & FLOWI_FLAG_RT_NOCACHE) - rth->dst.flags |= DST_NOCACHE; - return rth; } diff --git a/trunk/net/ipv4/sysctl_net_ipv4.c b/trunk/net/ipv4/sysctl_net_ipv4.c index 12aa0c5867c4..ef32956ed655 100644 --- a/trunk/net/ipv4/sysctl_net_ipv4.c +++ b/trunk/net/ipv4/sysctl_net_ipv4.c @@ -300,13 +300,6 @@ static struct ctl_table ipv4_table[] = { .mode = 0644, .proc_handler = proc_dointvec }, - { - .procname = "ip_early_demux", - .data = &sysctl_ip_early_demux, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec - }, { .procname = "ip_dynaddr", .data = &sysctl_ip_dynaddr, diff --git a/trunk/net/ipv4/tcp_input.c b/trunk/net/ipv4/tcp_input.c index 8416f8a68e65..b224eb8bce8b 100644 --- a/trunk/net/ipv4/tcp_input.c +++ b/trunk/net/ipv4/tcp_input.c @@ -5518,18 +5518,6 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, struct tcp_sock *tp = tcp_sk(sk); int res; - if (sk->sk_rx_dst) { - struct dst_entry *dst = sk->sk_rx_dst; - if (unlikely(dst->obsolete)) { - if (dst->ops->check(dst, 0) == NULL) { - dst_release(dst); - sk->sk_rx_dst = NULL; - } - } - } - if (unlikely(sk->sk_rx_dst == NULL)) - sk->sk_rx_dst = dst_clone(skb_dst(skb)); - /* * Header prediction. * The code loosely follows the one in the famous @@ -5741,10 +5729,8 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb) tcp_set_state(sk, TCP_ESTABLISHED); - if (skb != NULL) { - sk->sk_rx_dst = dst_clone(skb_dst(skb)); + if (skb != NULL) security_inet_conn_established(sk, skb); - } /* Make sure socket is routed, for correct metrics. */ icsk->icsk_af_ops->rebuild_header(sk); diff --git a/trunk/net/ipv4/tcp_ipv4.c b/trunk/net/ipv4/tcp_ipv4.c index b52934f5334e..fda2ca17135e 100644 --- a/trunk/net/ipv4/tcp_ipv4.c +++ b/trunk/net/ipv4/tcp_ipv4.c @@ -825,8 +825,7 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb, static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, struct request_sock *req, struct request_values *rvp, - u16 queue_mapping, - bool nocache) + u16 queue_mapping) { const struct inet_request_sock *ireq = inet_rsk(req); struct flowi4 fl4; @@ -834,7 +833,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, struct sk_buff * skb; /* First, grab a route. */ - if (!dst && (dst = inet_csk_route_req(sk, &fl4, req, nocache)) == NULL) + if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL) return -1; skb = tcp_make_synack(sk, dst, req, rvp); @@ -856,7 +855,7 @@ static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req, struct request_values *rvp) { TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); - return tcp_v4_send_synack(sk, NULL, req, rvp, 0, false); + return tcp_v4_send_synack(sk, NULL, req, rvp, 0); } /* @@ -1389,7 +1388,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) */ if (tmp_opt.saw_tstamp && tcp_death_row.sysctl_tw_recycle && - (dst = inet_csk_route_req(sk, &fl4, req, want_cookie)) != NULL && + (dst = inet_csk_route_req(sk, &fl4, req)) != NULL && fl4.daddr == saddr && (peer = rt_get_peer((struct rtable *)dst, fl4.daddr)) != NULL) { inet_peer_refcheck(peer); @@ -1425,8 +1424,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) if (tcp_v4_send_synack(sk, dst, req, (struct request_values *)&tmp_ext, - skb_get_queue_mapping(skb), - want_cookie) || + skb_get_queue_mapping(skb)) || want_cookie) goto drop_and_free; @@ -1673,58 +1671,6 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) } EXPORT_SYMBOL(tcp_v4_do_rcv); -int tcp_v4_early_demux(struct sk_buff *skb) -{ - struct net *net = dev_net(skb->dev); - const struct iphdr *iph; - const struct tcphdr *th; - struct net_device *dev; - struct sock *sk; - int err; - - err = -ENOENT; - if (skb->pkt_type != PACKET_HOST) - goto out_err; - - if (!pskb_may_pull(skb, ip_hdrlen(skb) + sizeof(struct tcphdr))) - goto out_err; - - iph = ip_hdr(skb); - th = (struct tcphdr *) ((char *)iph + ip_hdrlen(skb)); - - if (th->doff < sizeof(struct tcphdr) / 4) - goto out_err; - - if (!pskb_may_pull(skb, ip_hdrlen(skb) + th->doff * 4)) - goto out_err; - - dev = skb->dev; - sk = __inet_lookup_established(net, &tcp_hashinfo, - iph->saddr, th->source, - iph->daddr, th->dest, - dev->ifindex); - if (sk) { - skb->sk = sk; - skb->destructor = sock_edemux; - if (sk->sk_state != TCP_TIME_WAIT) { - struct dst_entry *dst = sk->sk_rx_dst; - if (dst) - dst = dst_check(dst, 0); - if (dst) { - struct rtable *rt = (struct rtable *) dst; - - if (rt->rt_iif == dev->ifindex) { - skb_dst_set_noref(skb, dst); - err = 0; - } - } - } - } - -out_err: - return err; -} - /* * From tcp_input.c */ diff --git a/trunk/net/ipv4/tcp_minisocks.c b/trunk/net/ipv4/tcp_minisocks.c index 72b7c63b1a39..cb015317c9f7 100644 --- a/trunk/net/ipv4/tcp_minisocks.c +++ b/trunk/net/ipv4/tcp_minisocks.c @@ -445,8 +445,6 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct tcp_sock *oldtp = tcp_sk(sk); struct tcp_cookie_values *oldcvp = oldtp->cookie_values; - newsk->sk_rx_dst = dst_clone(skb_dst(skb)); - /* TCP Cookie Transactions require space for the cookie pair, * as it differs for each connection. There is no need to * copy any s_data_payload stored at the original socket. diff --git a/trunk/net/ipv6/icmp.c b/trunk/net/ipv6/icmp.c index c7da1422cbde..5247d5c211f9 100644 --- a/trunk/net/ipv6/icmp.c +++ b/trunk/net/ipv6/icmp.c @@ -600,8 +600,9 @@ static void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info) { const struct inet6_protocol *ipprot; int inner_offset; - __be16 frag_off; + int hash; u8 nexthdr; + __be16 frag_off; if (!pskb_may_pull(skb, sizeof(struct ipv6hdr))) return; @@ -628,8 +629,10 @@ static void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info) --ANK (980726) */ + hash = nexthdr & (MAX_INET_PROTOS - 1); + rcu_read_lock(); - ipprot = rcu_dereference(inet6_protos[nexthdr]); + ipprot = rcu_dereference(inet6_protos[hash]); if (ipprot && ipprot->err_handler) ipprot->err_handler(skb, NULL, type, code, inner_offset, info); rcu_read_unlock(); diff --git a/trunk/net/ipv6/ip6_input.c b/trunk/net/ipv6/ip6_input.c index 5ab923e51af3..21a15dfe4a9e 100644 --- a/trunk/net/ipv6/ip6_input.c +++ b/trunk/net/ipv6/ip6_input.c @@ -168,12 +168,13 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt static int ip6_input_finish(struct sk_buff *skb) { - struct net *net = dev_net(skb_dst(skb)->dev); const struct inet6_protocol *ipprot; - struct inet6_dev *idev; unsigned int nhoff; int nexthdr; bool raw; + u8 hash; + struct inet6_dev *idev; + struct net *net = dev_net(skb_dst(skb)->dev); /* * Parse extension headers @@ -188,7 +189,9 @@ static int ip6_input_finish(struct sk_buff *skb) nexthdr = skb_network_header(skb)[nhoff]; raw = raw6_local_deliver(skb, nexthdr); - if ((ipprot = rcu_dereference(inet6_protos[nexthdr])) != NULL) { + + hash = nexthdr & (MAX_INET_PROTOS - 1); + if ((ipprot = rcu_dereference(inet6_protos[hash])) != NULL) { int ret; if (ipprot->flags & INET6_PROTO_FINAL) { diff --git a/trunk/net/ipv6/protocol.c b/trunk/net/ipv6/protocol.c index 053082dfc93e..9a7978fdc02a 100644 --- a/trunk/net/ipv6/protocol.c +++ b/trunk/net/ipv6/protocol.c @@ -29,7 +29,9 @@ const struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS] __read_mostly; int inet6_add_protocol(const struct inet6_protocol *prot, unsigned char protocol) { - return !cmpxchg((const struct inet6_protocol **)&inet6_protos[protocol], + int hash = protocol & (MAX_INET_PROTOS - 1); + + return !cmpxchg((const struct inet6_protocol **)&inet6_protos[hash], NULL, prot) ? 0 : -1; } EXPORT_SYMBOL(inet6_add_protocol); @@ -40,9 +42,9 @@ EXPORT_SYMBOL(inet6_add_protocol); int inet6_del_protocol(const struct inet6_protocol *prot, unsigned char protocol) { - int ret; + int ret, hash = protocol & (MAX_INET_PROTOS - 1); - ret = (cmpxchg((const struct inet6_protocol **)&inet6_protos[protocol], + ret = (cmpxchg((const struct inet6_protocol **)&inet6_protos[hash], prot, NULL) == prot) ? 0 : -1; synchronize_net(); diff --git a/trunk/net/ipv6/raw.c b/trunk/net/ipv6/raw.c index b5c1dcb27737..43b0042f15f4 100644 --- a/trunk/net/ipv6/raw.c +++ b/trunk/net/ipv6/raw.c @@ -165,7 +165,7 @@ static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr) saddr = &ipv6_hdr(skb)->saddr; daddr = saddr + 1; - hash = nexthdr & (RAW_HTABLE_SIZE - 1); + hash = nexthdr & (MAX_INET_PROTOS - 1); read_lock(&raw_v6_hashinfo.lock); sk = sk_head(&raw_v6_hashinfo.ht[hash]); @@ -229,7 +229,7 @@ bool raw6_local_deliver(struct sk_buff *skb, int nexthdr) { struct sock *raw_sk; - raw_sk = sk_head(&raw_v6_hashinfo.ht[nexthdr & (RAW_HTABLE_SIZE - 1)]); + raw_sk = sk_head(&raw_v6_hashinfo.ht[nexthdr & (MAX_INET_PROTOS - 1)]); if (raw_sk && !ipv6_raw_deliver(skb, nexthdr)) raw_sk = NULL; diff --git a/trunk/net/mac80211/cfg.c b/trunk/net/mac80211/cfg.c index 85ac364f4636..498c94e34427 100644 --- a/trunk/net/mac80211/cfg.c +++ b/trunk/net/mac80211/cfg.c @@ -2097,9 +2097,6 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy, struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); int i, ret; - if (!ieee80211_sdata_running(sdata)) - return -ENETDOWN; - if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) { ret = drv_set_bitrate_mask(local, sdata, mask); if (ret) diff --git a/trunk/net/mac80211/mlme.c b/trunk/net/mac80211/mlme.c index 079038d26a14..d7134c170336 100644 --- a/trunk/net/mac80211/mlme.c +++ b/trunk/net/mac80211/mlme.c @@ -1337,8 +1337,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, if (WARN_ON(!ifmgd->associated)) return; - ieee80211_stop_poll(sdata); - memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN); ifmgd->associated = NULL; @@ -2594,6 +2592,8 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata, struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; u8 frame_buf[DEAUTH_DISASSOC_LEN]; + ieee80211_stop_poll(sdata); + ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason, false, frame_buf); mutex_unlock(&ifmgd->mtx); diff --git a/trunk/net/mac80211/sta_info.h b/trunk/net/mac80211/sta_info.h index a470e1123a55..3bb24a121c95 100644 --- a/trunk/net/mac80211/sta_info.h +++ b/trunk/net/mac80211/sta_info.h @@ -271,9 +271,6 @@ struct sta_ampdu_mlme { * @plink_timer: peer link watch timer * @plink_timer_was_running: used by suspend/resume to restore timers * @t_offset: timing offset relative to this host - * @t_offset_setpoint: reference timing offset of this sta to be used when - * calculating clockdrift - * @ch_type: peer's channel type * @debugfs: debug filesystem info * @dead: set to true when sta is unlinked * @uploaded: set to true when sta is uploaded to the driver @@ -281,8 +278,6 @@ struct sta_ampdu_mlme { * @sta: station information we share with the driver * @sta_state: duplicates information about station state (for debug) * @beacon_loss_count: number of times beacon loss has triggered - * @supports_40mhz: tracks whether the station advertised 40 MHz support - * as we overwrite its HT parameters with the currently used value */ struct sta_info { /* General information, mostly static */ diff --git a/trunk/net/netfilter/core.c b/trunk/net/netfilter/core.c index 7eef8453b909..4cd10ed2d6e6 100644 --- a/trunk/net/netfilter/core.c +++ b/trunk/net/netfilter/core.c @@ -265,7 +265,7 @@ void nf_conntrack_destroy(struct nf_conntrack *nfct) } EXPORT_SYMBOL(nf_conntrack_destroy); -struct nfq_ct_hook *nfq_ct_hook; +struct nfq_ct_hook __rcu *nfq_ct_hook __read_mostly; EXPORT_SYMBOL_GPL(nfq_ct_hook); #endif /* CONFIG_NF_CONNTRACK */ diff --git a/trunk/net/wireless/reg.c b/trunk/net/wireless/reg.c index baf5704740ee..15f347477a99 100644 --- a/trunk/net/wireless/reg.c +++ b/trunk/net/wireless/reg.c @@ -1389,7 +1389,7 @@ static void reg_set_request_processed(void) spin_unlock(®_requests_lock); if (last_request->initiator == NL80211_REGDOM_SET_BY_USER) - cancel_delayed_work(®_timeout); + cancel_delayed_work_sync(®_timeout); if (need_more_processing) schedule_work(®_work); diff --git a/trunk/net/wireless/util.c b/trunk/net/wireless/util.c index 316cfd00914f..8f2d68fc3a44 100644 --- a/trunk/net/wireless/util.c +++ b/trunk/net/wireless/util.c @@ -804,7 +804,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev, ntype == NL80211_IFTYPE_P2P_CLIENT)) return -EBUSY; - if (ntype != otype && netif_running(dev)) { + if (ntype != otype) { err = cfg80211_can_change_interface(rdev, dev->ieee80211_ptr, ntype); if (err)