diff --git a/[refs] b/[refs] index 22e589c52c0f..0659f36b9601 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: a77f4b4acf5b77f038bc11d3ca9b3af6f0124015 +refs/heads/master: 5b92da0443c2585e31b64e86c2e1b8e22845d4bb diff --git a/trunk/drivers/bluetooth/bluecard_cs.c b/trunk/drivers/bluetooth/bluecard_cs.c index 585c88e01893..1fcd92380356 100644 --- a/trunk/drivers/bluetooth/bluecard_cs.c +++ b/trunk/drivers/bluetooth/bluecard_cs.c @@ -231,12 +231,12 @@ static void bluecard_write_wakeup(bluecard_info_t *info) } do { - unsigned int iobase = info->p_dev->resource[0]->start; - unsigned int offset; - unsigned char command; - unsigned long ready_bit; + register unsigned int iobase = info->p_dev->resource[0]->start; + register unsigned int offset; + register unsigned char command; + register unsigned long ready_bit; register struct sk_buff *skb; - int len; + register int len; clear_bit(XMIT_WAKEUP, &(info->tx_state)); diff --git a/trunk/drivers/bluetooth/bpa10x.c b/trunk/drivers/bluetooth/bpa10x.c index 29caaed2d715..609861a53c28 100644 --- a/trunk/drivers/bluetooth/bpa10x.c +++ b/trunk/drivers/bluetooth/bpa10x.c @@ -470,7 +470,7 @@ static int bpa10x_probe(struct usb_interface *intf, const struct usb_device_id * hdev->flush = bpa10x_flush; hdev->send = bpa10x_send_frame; - set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks); + set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); err = hci_register_dev(hdev); if (err < 0) { diff --git a/trunk/drivers/bluetooth/bt3c_cs.c b/trunk/drivers/bluetooth/bt3c_cs.c index b2b0fbbb43b5..308c8599ab55 100644 --- a/trunk/drivers/bluetooth/bt3c_cs.c +++ b/trunk/drivers/bluetooth/bt3c_cs.c @@ -186,9 +186,9 @@ static void bt3c_write_wakeup(bt3c_info_t *info) return; do { - unsigned int iobase = info->p_dev->resource[0]->start; + register unsigned int iobase = info->p_dev->resource[0]->start; register struct sk_buff *skb; - int len; + register int len; if (!pcmcia_dev_present(info->p_dev)) break; diff --git a/trunk/drivers/bluetooth/btmrvl_sdio.c b/trunk/drivers/bluetooth/btmrvl_sdio.c index 2867499f7256..a853244e7fd7 100644 --- a/trunk/drivers/bluetooth/btmrvl_sdio.c +++ b/trunk/drivers/bluetooth/btmrvl_sdio.c @@ -110,9 +110,6 @@ static const struct sdio_device_id btmrvl_sdio_ids[] = { /* Marvell SD8787 Bluetooth device */ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911A), .driver_data = (unsigned long) &btmrvl_sdio_sd8787 }, - /* Marvell SD8787 Bluetooth AMP device */ - { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911B), - .driver_data = (unsigned long) &btmrvl_sdio_sd8787 }, /* Marvell SD8797 Bluetooth device */ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912A), .driver_data = (unsigned long) &btmrvl_sdio_sd8797 }, diff --git a/trunk/drivers/bluetooth/btuart_cs.c b/trunk/drivers/bluetooth/btuart_cs.c index 65b8d996840c..c4fc2f3fc32c 100644 --- a/trunk/drivers/bluetooth/btuart_cs.c +++ b/trunk/drivers/bluetooth/btuart_cs.c @@ -140,9 +140,9 @@ static void btuart_write_wakeup(btuart_info_t *info) } do { - unsigned int iobase = info->p_dev->resource[0]->start; + register unsigned int iobase = info->p_dev->resource[0]->start; register struct sk_buff *skb; - int len; + register int len; clear_bit(XMIT_WAKEUP, &(info->tx_state)); diff --git a/trunk/drivers/bluetooth/btusb.c b/trunk/drivers/bluetooth/btusb.c index a45e717f5f84..c9463af8e564 100644 --- a/trunk/drivers/bluetooth/btusb.c +++ b/trunk/drivers/bluetooth/btusb.c @@ -21,7 +21,15 @@ * */ +#include #include +#include +#include +#include +#include +#include +#include + #include #include @@ -1018,7 +1026,7 @@ static int btusb_probe(struct usb_interface *intf, data->isoc = usb_ifnum_to_if(data->udev, 1); if (!reset) - set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks); + set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); if (force_scofix || id->driver_info & BTUSB_WRONG_SCO_MTU) { if (!disable_scofix) @@ -1030,7 +1038,7 @@ static int btusb_probe(struct usb_interface *intf, if (id->driver_info & BTUSB_DIGIANSWER) { data->cmdreq_type = USB_TYPE_VENDOR; - set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks); + set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); } if (id->driver_info & BTUSB_CSR) { @@ -1038,7 +1046,7 @@ static int btusb_probe(struct usb_interface *intf, /* Old firmware would otherwise execute USB reset */ if (le16_to_cpu(udev->descriptor.bcdDevice) < 0x117) - set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks); + set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); } if (id->driver_info & BTUSB_SNIFFER) { diff --git a/trunk/drivers/bluetooth/dtl1_cs.c b/trunk/drivers/bluetooth/dtl1_cs.c index b1b37ccd3cd4..6e8d96189684 100644 --- a/trunk/drivers/bluetooth/dtl1_cs.c +++ b/trunk/drivers/bluetooth/dtl1_cs.c @@ -144,9 +144,9 @@ static void dtl1_write_wakeup(dtl1_info_t *info) } do { - unsigned int iobase = info->p_dev->resource[0]->start; + register unsigned int iobase = info->p_dev->resource[0]->start; register struct sk_buff *skb; - int len; + register int len; clear_bit(XMIT_WAKEUP, &(info->tx_state)); diff --git a/trunk/drivers/bluetooth/hci_bcsp.c b/trunk/drivers/bluetooth/hci_bcsp.c index 57e502e06080..661a8dc4d2f8 100644 --- a/trunk/drivers/bluetooth/hci_bcsp.c +++ b/trunk/drivers/bluetooth/hci_bcsp.c @@ -552,7 +552,7 @@ static u16 bscp_get_crc(struct bcsp_struct *bcsp) static int bcsp_recv(struct hci_uart *hu, void *data, int count) { struct bcsp_struct *bcsp = hu->priv; - unsigned char *ptr; + register unsigned char *ptr; BT_DBG("hu %p count %d rx_state %d rx_count %ld", hu, count, bcsp->rx_state, bcsp->rx_count); diff --git a/trunk/drivers/bluetooth/hci_h4.c b/trunk/drivers/bluetooth/hci_h4.c index c60623f206d4..748329468d26 100644 --- a/trunk/drivers/bluetooth/hci_h4.c +++ b/trunk/drivers/bluetooth/hci_h4.c @@ -126,7 +126,7 @@ static int h4_enqueue(struct hci_uart *hu, struct sk_buff *skb) static inline int h4_check_data_len(struct h4_struct *h4, int len) { - int room = skb_tailroom(h4->rx_skb); + register int room = skb_tailroom(h4->rx_skb); BT_DBG("len %d room %d", len, room); diff --git a/trunk/drivers/bluetooth/hci_ldisc.c b/trunk/drivers/bluetooth/hci_ldisc.c index 2f9b796e106e..e564579a6115 100644 --- a/trunk/drivers/bluetooth/hci_ldisc.c +++ b/trunk/drivers/bluetooth/hci_ldisc.c @@ -394,7 +394,7 @@ static int hci_uart_register_dev(struct hci_uart *hu) set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks); if (!test_bit(HCI_UART_RESET_ON_INIT, &hu->hdev_flags)) - set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks); + set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks); if (test_bit(HCI_UART_CREATE_AMP, &hu->hdev_flags)) hdev->dev_type = HCI_AMP; diff --git a/trunk/drivers/bluetooth/hci_ll.c b/trunk/drivers/bluetooth/hci_ll.c index ff6d589c34a5..b874c0efde24 100644 --- a/trunk/drivers/bluetooth/hci_ll.c +++ b/trunk/drivers/bluetooth/hci_ll.c @@ -348,7 +348,7 @@ static int ll_enqueue(struct hci_uart *hu, struct sk_buff *skb) static inline int ll_check_data_len(struct ll_struct *ll, int len) { - int room = skb_tailroom(ll->rx_skb); + register int room = skb_tailroom(ll->rx_skb); BT_DBG("len %d room %d", len, room); @@ -374,11 +374,11 @@ static inline int ll_check_data_len(struct ll_struct *ll, int len) static int ll_recv(struct hci_uart *hu, void *data, int count) { struct ll_struct *ll = hu->priv; - char *ptr; + register char *ptr; struct hci_event_hdr *eh; struct hci_acl_hdr *ah; struct hci_sco_hdr *sh; - int len, type, dlen; + register int len, type, dlen; BT_DBG("hu %p count %d rx_state %ld rx_count %ld", hu, count, ll->rx_state, ll->rx_count); diff --git a/trunk/drivers/net/can/c_can/Kconfig b/trunk/drivers/net/can/c_can/Kconfig index 25d371cf98dd..3b83bafcd947 100644 --- a/trunk/drivers/net/can/c_can/Kconfig +++ b/trunk/drivers/net/can/c_can/Kconfig @@ -13,4 +13,11 @@ config CAN_C_CAN_PLATFORM boards from ST Microelectronics (http://www.st.com) like the SPEAr1310 and SPEAr320 evaluation boards & TI (www.ti.com) boards like am335x, dm814x, dm813x and dm811x. + +config CAN_C_CAN_PCI + tristate "Generic PCI Bus based C_CAN/D_CAN driver" + depends on PCI + ---help--- + This driver adds support for the C_CAN/D_CAN chips connected + to the PCI bus. endif diff --git a/trunk/drivers/net/can/c_can/Makefile b/trunk/drivers/net/can/c_can/Makefile index 9273f6d5c4b7..ad1cc842170a 100644 --- a/trunk/drivers/net/can/c_can/Makefile +++ b/trunk/drivers/net/can/c_can/Makefile @@ -4,5 +4,6 @@ obj-$(CONFIG_CAN_C_CAN) += c_can.o obj-$(CONFIG_CAN_C_CAN_PLATFORM) += c_can_platform.o +obj-$(CONFIG_CAN_C_CAN_PCI) += c_can_pci.o ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG diff --git a/trunk/drivers/net/can/c_can/c_can_pci.c b/trunk/drivers/net/can/c_can/c_can_pci.c new file mode 100644 index 000000000000..914aecfa09a9 --- /dev/null +++ b/trunk/drivers/net/can/c_can/c_can_pci.c @@ -0,0 +1,236 @@ +/* + * PCI bus driver for Bosch C_CAN/D_CAN controller + * + * Copyright (C) 2012 Federico Vaga + * + * Borrowed from c_can_platform.c + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include +#include +#include +#include +#include + +#include + +#include "c_can.h" + +enum c_can_pci_reg_align { + C_CAN_REG_ALIGN_16, + C_CAN_REG_ALIGN_32, +}; + +struct c_can_pci_data { + /* Specify if is C_CAN or D_CAN */ + enum c_can_dev_id type; + /* Set the register alignment in the memory */ + enum c_can_pci_reg_align reg_align; + /* Set the frequency if clk is not usable */ + unsigned int freq; +}; + +/* + * 16-bit c_can registers can be arranged differently in the memory + * architecture of different implementations. For example: 16-bit + * registers can be aligned to a 16-bit boundary or 32-bit boundary etc. + * Handle the same by providing a common read/write interface. + */ +static u16 c_can_pci_read_reg_aligned_to_16bit(struct c_can_priv *priv, + enum reg index) +{ + return readw(priv->base + priv->regs[index]); +} + +static void c_can_pci_write_reg_aligned_to_16bit(struct c_can_priv *priv, + enum reg index, u16 val) +{ + writew(val, priv->base + priv->regs[index]); +} + +static u16 c_can_pci_read_reg_aligned_to_32bit(struct c_can_priv *priv, + enum reg index) +{ + return readw(priv->base + 2 * priv->regs[index]); +} + +static void c_can_pci_write_reg_aligned_to_32bit(struct c_can_priv *priv, + enum reg index, u16 val) +{ + writew(val, priv->base + 2 * priv->regs[index]); +} + +static int __devinit c_can_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct c_can_pci_data *c_can_pci_data = (void *)ent->driver_data; + struct c_can_priv *priv; + struct net_device *dev; + void __iomem *addr; + struct clk *clk; + int ret; + + ret = pci_enable_device(pdev); + if (ret) { + dev_err(&pdev->dev, "pci_enable_device FAILED\n"); + goto out; + } + + ret = pci_request_regions(pdev, KBUILD_MODNAME); + if (ret) { + dev_err(&pdev->dev, "pci_request_regions FAILED\n"); + goto out_disable_device; + } + + pci_set_master(pdev); + pci_enable_msi(pdev); + + addr = pci_iomap(pdev, 0, pci_resource_len(pdev, 0)); + if (!addr) { + dev_err(&pdev->dev, + "device has no PCI memory resources, " + "failing adapter\n"); + ret = -ENOMEM; + goto out_release_regions; + } + + /* allocate the c_can device */ + dev = alloc_c_can_dev(); + if (!dev) { + ret = -ENOMEM; + goto out_iounmap; + } + + priv = netdev_priv(dev); + pci_set_drvdata(pdev, dev); + SET_NETDEV_DEV(dev, &pdev->dev); + + dev->irq = pdev->irq; + priv->base = addr; + + if (!c_can_pci_data->freq) { + /* get the appropriate clk */ + clk = clk_get(&pdev->dev, NULL); + if (IS_ERR(clk)) { + dev_err(&pdev->dev, "no clock defined\n"); + ret = -ENODEV; + goto out_free_c_can; + } + priv->can.clock.freq = clk_get_rate(clk); + priv->priv = clk; + } else { + priv->can.clock.freq = c_can_pci_data->freq; + priv->priv = NULL; + } + + /* Configure CAN type */ + switch (c_can_pci_data->type) { + case C_CAN_DEVTYPE: + priv->regs = reg_map_c_can; + break; + case D_CAN_DEVTYPE: + priv->regs = reg_map_d_can; + priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; + break; + default: + ret = -EINVAL; + goto out_free_clock; + } + + /* Configure access to registers */ + switch (c_can_pci_data->reg_align) { + case C_CAN_REG_ALIGN_32: + priv->read_reg = c_can_pci_read_reg_aligned_to_32bit; + priv->write_reg = c_can_pci_write_reg_aligned_to_32bit; + break; + case C_CAN_REG_ALIGN_16: + priv->read_reg = c_can_pci_read_reg_aligned_to_16bit; + priv->write_reg = c_can_pci_write_reg_aligned_to_16bit; + break; + default: + ret = -EINVAL; + goto out_free_clock; + } + + ret = register_c_can_dev(dev); + if (ret) { + dev_err(&pdev->dev, "registering %s failed (err=%d)\n", + KBUILD_MODNAME, ret); + goto out_free_clock; + } + + dev_dbg(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n", + KBUILD_MODNAME, priv->regs, dev->irq); + + return 0; + +out_free_clock: + if (priv->priv) + clk_put(priv->priv); +out_free_c_can: + pci_set_drvdata(pdev, NULL); + free_c_can_dev(dev); +out_iounmap: + pci_iounmap(pdev, addr); +out_release_regions: + pci_disable_msi(pdev); + pci_clear_master(pdev); + pci_release_regions(pdev); +out_disable_device: + pci_disable_device(pdev); +out: + return ret; +} + +static void __devexit c_can_pci_remove(struct pci_dev *pdev) +{ + struct net_device *dev = pci_get_drvdata(pdev); + struct c_can_priv *priv = netdev_priv(dev); + + unregister_c_can_dev(dev); + + if (priv->priv) + clk_put(priv->priv); + + pci_set_drvdata(pdev, NULL); + free_c_can_dev(dev); + + pci_iounmap(pdev, priv->base); + pci_disable_msi(pdev); + pci_clear_master(pdev); + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +static struct c_can_pci_data c_can_sta2x11= { + .type = C_CAN_DEVTYPE, + .reg_align = C_CAN_REG_ALIGN_32, + .freq = 52000000, /* 52 Mhz */ +}; + +#define C_CAN_ID(_vend, _dev, _driverdata) { \ + PCI_DEVICE(_vend, _dev), \ + .driver_data = (unsigned long)&_driverdata, \ +} +static DEFINE_PCI_DEVICE_TABLE(c_can_pci_tbl) = { + C_CAN_ID(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_CAN, + c_can_sta2x11), + {}, +}; +static struct pci_driver c_can_pci_driver = { + .name = KBUILD_MODNAME, + .id_table = c_can_pci_tbl, + .probe = c_can_pci_probe, + .remove = __devexit_p(c_can_pci_remove), +}; + +module_pci_driver(c_can_pci_driver); + +MODULE_AUTHOR("Federico Vaga "); +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("PCI CAN bus driver for Bosch C_CAN/D_CAN controller"); +MODULE_DEVICE_TABLE(pci, c_can_pci_tbl); diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 7211cb07426e..7de824184979 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -23,8 +23,8 @@ * (you will need to reboot afterwards) */ /* #define BNX2X_STOP_ON_ERROR */ -#define DRV_MODULE_VERSION "1.72.51-0" -#define DRV_MODULE_RELDATE "2012/06/18" +#define DRV_MODULE_VERSION "1.72.50-0" +#define DRV_MODULE_RELDATE "2012/04/23" #define BNX2X_BC_VER 0x040200 #if defined(CONFIG_DCB) @@ -248,12 +248,13 @@ enum { BNX2X_MAX_CNIC_ETH_CL_ID_IDX, }; -#define BNX2X_CNIC_START_ETH_CID(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) *\ - (bp)->max_cos) +#define BNX2X_CNIC_START_ETH_CID 48 +enum { /* iSCSI L2 */ -#define BNX2X_ISCSI_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp)) + BNX2X_ISCSI_ETH_CID = BNX2X_CNIC_START_ETH_CID, /* FCoE L2 */ -#define BNX2X_FCOE_ETH_CID(bp) (BNX2X_CNIC_START_ETH_CID(bp) + 1) + BNX2X_FCOE_ETH_CID, +}; /** Additional rings budgeting */ #ifdef BCM_CNIC @@ -275,30 +276,29 @@ enum { #define FIRST_TX_ONLY_COS_INDEX 1 #define FIRST_TX_COS_INDEX 0 +/* defines for decodeing the fastpath index and the cos index out of the + * transmission queue index + */ +#define MAX_TXQS_PER_COS FP_SB_MAX_E1x + +#define TXQ_TO_FP(txq_index) ((txq_index) % MAX_TXQS_PER_COS) +#define TXQ_TO_COS(txq_index) ((txq_index) / MAX_TXQS_PER_COS) + /* rules for calculating the cids of tx-only connections */ -#define CID_TO_FP(cid, bp) ((cid) % BNX2X_NUM_NON_CNIC_QUEUES(bp)) -#define CID_COS_TO_TX_ONLY_CID(cid, cos, bp) \ - (cid + cos * BNX2X_NUM_NON_CNIC_QUEUES(bp)) +#define CID_TO_FP(cid) ((cid) % MAX_TXQS_PER_COS) +#define CID_COS_TO_TX_ONLY_CID(cid, cos) (cid + cos * MAX_TXQS_PER_COS) /* fp index inside class of service range */ -#define FP_COS_TO_TXQ(fp, cos, bp) \ - ((fp)->index + cos * BNX2X_NUM_NON_CNIC_QUEUES(bp)) - -/* Indexes for transmission queues array: - * txdata for RSS i CoS j is at location i + (j * num of RSS) - * txdata for FCoE (if exist) is at location max cos * num of RSS - * txdata for FWD (if exist) is one location after FCoE - * txdata for OOO (if exist) is one location after FWD +#define FP_COS_TO_TXQ(fp, cos) ((fp)->index + cos * MAX_TXQS_PER_COS) + +/* + * 0..15 eth cos0 + * 16..31 eth cos1 if applicable + * 32..47 eth cos2 If applicable + * fcoe queue follows eth queues (16, 32, 48 depending on cos) */ -enum { - FCOE_TXQ_IDX_OFFSET, - FWD_TXQ_IDX_OFFSET, - OOO_TXQ_IDX_OFFSET, -}; -#define MAX_ETH_TXQ_IDX(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) * (bp)->max_cos) -#ifdef BCM_CNIC -#define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp) + FCOE_TXQ_IDX_OFFSET) -#endif +#define MAX_ETH_TXQ_IDX(bp) (MAX_TXQS_PER_COS * (bp)->max_cos) +#define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp)) /* fast path */ /* @@ -481,8 +481,6 @@ struct bnx2x_fp_txdata { __le16 *tx_cons_sb; int txq_index; - struct bnx2x_fastpath *parent_fp; - int tx_ring_size; }; enum bnx2x_tpa_mode_t { @@ -509,7 +507,7 @@ struct bnx2x_fastpath { enum bnx2x_tpa_mode_t mode; u8 max_cos; /* actual number of active tx coses */ - struct bnx2x_fp_txdata *txdata_ptr[BNX2X_MULTI_TX_COS]; + struct bnx2x_fp_txdata txdata[BNX2X_MULTI_TX_COS]; struct sw_rx_bd *rx_buf_ring; /* BDs mappings ring */ struct sw_rx_page *rx_page_ring; /* SGE pages mappings ring */ @@ -549,45 +547,51 @@ struct bnx2x_fastpath { rx_calls; /* TPA related */ - struct bnx2x_agg_info *tpa_info; + struct bnx2x_agg_info tpa_info[ETH_MAX_AGGREGATION_QUEUES_E1H_E2]; u8 disable_tpa; #ifdef BNX2X_STOP_ON_ERROR u64 tpa_queue_used; #endif + + struct tstorm_per_queue_stats old_tclient; + struct ustorm_per_queue_stats old_uclient; + struct xstorm_per_queue_stats old_xclient; + struct bnx2x_eth_q_stats eth_q_stats; + struct bnx2x_eth_q_stats_old eth_q_stats_old; + /* The size is calculated using the following: sizeof name field from netdev structure + 4 ('-Xx-' string) + 4 (for the digits and to make it DWORD aligned) */ #define FP_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8) char name[FP_NAME_SIZE]; + + /* MACs object */ + struct bnx2x_vlan_mac_obj mac_obj; + + /* Queue State object */ + struct bnx2x_queue_sp_obj q_obj; + }; -#define bnx2x_fp(bp, nr, var) ((bp)->fp[(nr)].var) -#define bnx2x_sp_obj(bp, fp) ((bp)->sp_objs[(fp)->index]) -#define bnx2x_fp_stats(bp, fp) (&((bp)->fp_stats[(fp)->index])) -#define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats)) +#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var) /* Use 2500 as a mini-jumbo MTU for FCoE */ #define BNX2X_FCOE_MINI_JUMBO_MTU 2500 -#define FCOE_IDX_OFFSET 0 - -#define FCOE_IDX(bp) (BNX2X_NUM_NON_CNIC_QUEUES(bp) + \ - FCOE_IDX_OFFSET) -#define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX(bp)]) -#define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var) -#define bnx2x_fcoe_inner_sp_obj(bp) (&bp->sp_objs[FCOE_IDX(bp)]) -#define bnx2x_fcoe_sp_obj(bp, var) (bnx2x_fcoe_inner_sp_obj(bp)->var) -#define bnx2x_fcoe_tx(bp, var) (bnx2x_fcoe_fp(bp)-> \ - txdata_ptr[FIRST_TX_COS_INDEX] \ - ->var) +/* FCoE L2 `fastpath' entry is right after the eth entries */ +#define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp) +#define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX]) +#define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var) +#define bnx2x_fcoe_tx(bp, var) (bnx2x_fcoe_fp(bp)-> \ + txdata[FIRST_TX_COS_INDEX].var) #define IS_ETH_FP(fp) (fp->index < \ BNX2X_NUM_ETH_QUEUES(fp->bp)) #ifdef BCM_CNIC -#define IS_FCOE_FP(fp) (fp->index == FCOE_IDX(fp->bp)) -#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX(bp)) +#define IS_FCOE_FP(fp) (fp->index == FCOE_IDX) +#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX) #else #define IS_FCOE_FP(fp) false #define IS_FCOE_IDX(idx) false @@ -974,8 +978,8 @@ union cdu_context { }; /* CDU host DB constants */ -#define CDU_ILT_PAGE_SZ_HW 2 -#define CDU_ILT_PAGE_SZ (8192 << CDU_ILT_PAGE_SZ_HW) /* 32K */ +#define CDU_ILT_PAGE_SZ_HW 3 +#define CDU_ILT_PAGE_SZ (8192 << CDU_ILT_PAGE_SZ_HW) /* 64K */ #define ILT_PAGE_CIDS (CDU_ILT_PAGE_SZ / sizeof(union cdu_context)) #ifdef BCM_CNIC @@ -1178,31 +1182,11 @@ struct bnx2x_prev_path_list { struct list_head list; }; -struct bnx2x_sp_objs { - /* MACs object */ - struct bnx2x_vlan_mac_obj mac_obj; - - /* Queue State object */ - struct bnx2x_queue_sp_obj q_obj; -}; - -struct bnx2x_fp_stats { - struct tstorm_per_queue_stats old_tclient; - struct ustorm_per_queue_stats old_uclient; - struct xstorm_per_queue_stats old_xclient; - struct bnx2x_eth_q_stats eth_q_stats; - struct bnx2x_eth_q_stats_old eth_q_stats_old; -}; - struct bnx2x { /* Fields used in the tx and intr/napi performance paths * are grouped together in the beginning of the structure */ struct bnx2x_fastpath *fp; - struct bnx2x_sp_objs *sp_objs; - struct bnx2x_fp_stats *fp_stats; - struct bnx2x_fp_txdata *bnx2x_txq; - int bnx2x_txq_size; void __iomem *regview; void __iomem *doorbells; u16 db_size; @@ -1318,7 +1302,6 @@ struct bnx2x { #define NO_FCOE_FLAG (1 << 15) #define BC_SUPPORTS_PFC_STATS (1 << 17) #define USING_SINGLE_MSIX_FLAG (1 << 20) -#define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21) #define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG) #define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG) @@ -1394,7 +1377,6 @@ struct bnx2x { #define BNX2X_MAX_COS 3 #define BNX2X_MAX_TX_COS 2 int num_queues; - int num_napi_queues; int disable_tpa; u32 rx_mode; @@ -1407,7 +1389,6 @@ struct bnx2x { u8 igu_dsb_id; u8 igu_base_sb; u8 igu_sb_cnt; - dma_addr_t def_status_blk_mapping; struct bnx2x_slowpath *slowpath; @@ -1439,11 +1420,7 @@ struct bnx2x { dma_addr_t fw_stats_data_mapping; int fw_stats_data_sz; - /* For max 196 cids (64*3 + non-eth), 32KB ILT page size and 1KB - * context size we need 8 ILT entries. - */ -#define ILT_MAX_L2_LINES 8 - struct hw_context context[ILT_MAX_L2_LINES]; + struct hw_context context; struct bnx2x_ilt *ilt; #define BP_ILT(bp) ((bp)->ilt) @@ -1456,14 +1433,13 @@ struct bnx2x { /* * Maximum CID count that might be required by the bnx2x: - * Max RSS * Max_Tx_Multi_Cos + FCoE + iSCSI + * Max Tss * Max_Tx_Multi_Cos + CNIC L2 Clients (FCoE and iSCSI related) */ -#define BNX2X_L2_CID_COUNT(bp) (BNX2X_NUM_ETH_QUEUES(bp) * BNX2X_MULTI_TX_COS \ - + NON_ETH_CONTEXT_USE + CNIC_PRESENT) -#define BNX2X_L2_MAX_CID(bp) (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS \ - + NON_ETH_CONTEXT_USE + CNIC_PRESENT) +#define BNX2X_L2_CID_COUNT(bp) (MAX_TXQS_PER_COS * BNX2X_MULTI_TX_COS +\ + NON_ETH_CONTEXT_USE + CNIC_PRESENT) #define L2_ILT_LINES(bp) (DIV_ROUND_UP(BNX2X_L2_CID_COUNT(bp),\ ILT_PAGE_CIDS)) +#define BNX2X_DB_SIZE(bp) (BNX2X_L2_CID_COUNT(bp) * (1 << BNX2X_DB_SHIFT)) int qm_cid_count; @@ -1622,8 +1598,6 @@ struct bnx2x { extern int num_queues; #define BNX2X_NUM_QUEUES(bp) (bp->num_queues) #define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE) -#define BNX2X_NUM_NON_CNIC_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - \ - NON_ETH_CONTEXT_USE) #define BNX2X_NUM_RX_QUEUES(bp) BNX2X_NUM_QUEUES(bp) #define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1) @@ -1682,9 +1656,6 @@ struct bnx2x_func_init_params { continue; \ else -#define for_each_napi_rx_queue(bp, var) \ - for ((var) = 0; (var) < bp->num_napi_queues; (var)++) - /* Skip OOO FP */ #define for_each_tx_queue(bp, var) \ for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ @@ -1846,7 +1817,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, #define LOAD_NORMAL 0 #define LOAD_OPEN 1 #define LOAD_DIAG 2 -#define LOAD_LOOPBACK_EXT 3 #define UNLOAD_NORMAL 0 #define UNLOAD_CLOSE 1 #define UNLOAD_RECOVERY 2 @@ -1929,17 +1899,13 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, #define PCICFG_LINK_SPEED 0xf0000 #define PCICFG_LINK_SPEED_SHIFT 16 -#define BNX2X_NUM_TESTS_SF 7 -#define BNX2X_NUM_TESTS_MF 3 -#define BNX2X_NUM_TESTS(bp) (IS_MF(bp) ? BNX2X_NUM_TESTS_MF : \ - BNX2X_NUM_TESTS_SF) + +#define BNX2X_NUM_TESTS 7 #define BNX2X_PHY_LOOPBACK 0 #define BNX2X_MAC_LOOPBACK 1 -#define BNX2X_EXT_LOOPBACK 2 #define BNX2X_PHY_LOOPBACK_FAILED 1 #define BNX2X_MAC_LOOPBACK_FAILED 2 -#define BNX2X_EXT_LOOPBACK_FAILED 3 #define BNX2X_LOOPBACK_FAILED (BNX2X_MAC_LOOPBACK_FAILED | \ BNX2X_PHY_LOOPBACK_FAILED) diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 00951b3aa62b..8098eea9704d 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -40,19 +40,12 @@ * Makes sure the contents of the bp->fp[to].napi is kept * intact. This is done by first copying the napi struct from * the target to the source, and then mem copying the entire - * source onto the target. Update txdata pointers and related - * content. + * source onto the target */ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) { struct bnx2x_fastpath *from_fp = &bp->fp[from]; struct bnx2x_fastpath *to_fp = &bp->fp[to]; - struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from]; - struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to]; - struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from]; - struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to]; - int old_max_eth_txqs, new_max_eth_txqs; - int old_txdata_index = 0, new_txdata_index = 0; /* Copy the NAPI object as it has been already initialized */ from_fp->napi = to_fp->napi; @@ -60,30 +53,6 @@ static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to) /* Move bnx2x_fastpath contents */ memcpy(to_fp, from_fp, sizeof(*to_fp)); to_fp->index = to; - - /* move sp_objs contents as well, as their indices match fp ones */ - memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs)); - - /* move fp_stats contents as well, as their indices match fp ones */ - memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats)); - - /* Update txdata pointers in fp and move txdata content accordingly: - * Each fp consumes 'max_cos' txdata structures, so the index should be - * decremented by max_cos x delta. - */ - - old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos; - new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) * - (bp)->max_cos; - if (from == FCOE_IDX(bp)) { - old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET; - new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET; - } - - memcpy(&bp->bnx2x_txq[old_txdata_index], - &bp->bnx2x_txq[new_txdata_index], - sizeof(struct bnx2x_fp_txdata)); - to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index]; } int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */ @@ -510,7 +479,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, where we are and drop the whole packet */ err = bnx2x_alloc_rx_sge(bp, fp, sge_idx); if (unlikely(err)) { - bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; + fp->eth_q_stats.rx_skb_alloc_failed++; return err; } @@ -615,7 +584,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, /* drop the packet and keep the buffer in the bin */ DP(NETIF_MSG_RX_STATUS, "Failed to allocate or map a new skb - dropping packet!\n"); - bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++; + fp->eth_q_stats.rx_skb_alloc_failed++; } static int bnx2x_alloc_rx_data(struct bnx2x *bp, @@ -648,10 +617,8 @@ static int bnx2x_alloc_rx_data(struct bnx2x *bp, return 0; } -static -void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe, - struct bnx2x_fastpath *fp, - struct bnx2x_eth_q_stats *qstats) +static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe, + struct bnx2x_fastpath *fp) { /* Do nothing if no IP/L4 csum validation was done */ @@ -665,7 +632,7 @@ void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe, if (cqe->fast_path_cqe.type_error_flags & (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG | ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG)) - qstats->hw_csum_err++; + fp->eth_q_stats.hw_csum_err++; else skb->ip_summed = CHECKSUM_UNNECESSARY; } @@ -809,7 +776,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS, "ERROR flags %x rx packet %u\n", cqe_fp_flags, sw_comp_cons); - bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++; + fp->eth_q_stats.rx_err_discard_pkt++; goto reuse_rx; } @@ -822,7 +789,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) if (skb == NULL) { DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS, "ERROR packet dropped because of alloc failure\n"); - bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; + fp->eth_q_stats.rx_skb_alloc_failed++; goto reuse_rx; } memcpy(skb->data, data + pad, len); @@ -836,15 +803,14 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) skb = build_skb(data, 0); if (unlikely(!skb)) { kfree(data); - bnx2x_fp_qstats(bp, fp)-> - rx_skb_alloc_failed++; + fp->eth_q_stats.rx_skb_alloc_failed++; goto next_rx; } skb_reserve(skb, pad); } else { DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS, "ERROR packet dropped because of alloc failure\n"); - bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++; + fp->eth_q_stats.rx_skb_alloc_failed++; reuse_rx: bnx2x_reuse_rx_data(fp, bd_cons, bd_prod); goto next_rx; @@ -860,8 +826,8 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) skb_checksum_none_assert(skb); if (bp->dev->features & NETIF_F_RXCSUM) - bnx2x_csum_validate(skb, cqe, fp, - bnx2x_fp_qstats(bp, fp)); + bnx2x_csum_validate(skb, cqe, fp); + skb_record_rx_queue(skb, fp->rx_queue); @@ -922,7 +888,7 @@ static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie) prefetch(fp->rx_cons_sb); for_each_cos_in_tx_queue(fp, cos) - prefetch(fp->txdata_ptr[cos]->tx_cons_sb); + prefetch(fp->txdata[cos].tx_cons_sb); prefetch(&fp->sb_running_index[SM_RX_ID]); napi_schedule(&bnx2x_fp(bp, fp->index, napi)); @@ -1239,7 +1205,7 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp) for_each_tx_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; for_each_cos_in_tx_queue(fp, cos) { - struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; + struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; unsigned pkts_compl = 0, bytes_compl = 0; u16 sw_prod = txdata->tx_pkt_prod; @@ -1251,8 +1217,7 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp) sw_cons++; } netdev_tx_reset_queue( - netdev_get_tx_queue(bp->dev, - txdata->txq_index)); + netdev_get_tx_queue(bp->dev, txdata->txq_index)); } } } @@ -1360,7 +1325,7 @@ void bnx2x_free_irq(struct bnx2x *bp) free_irq(bp->dev->irq, bp->dev); } -int bnx2x_enable_msix(struct bnx2x *bp) +int __devinit bnx2x_enable_msix(struct bnx2x *bp) { int msix_vec = 0, i, rc, req_cnt; @@ -1614,8 +1579,6 @@ void bnx2x_set_num_queues(struct bnx2x *bp) #endif /* Add special queues */ bp->num_queues += NON_ETH_CONTEXT_USE; - - BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues); } /** @@ -1644,8 +1607,8 @@ static int bnx2x_set_real_num_queues(struct bnx2x *bp) { int rc, tx, rx; - tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos; - rx = BNX2X_NUM_QUEUES(bp) - NON_ETH_CONTEXT_USE; + tx = MAX_TXQS_PER_COS * bp->max_cos; + rx = BNX2X_NUM_ETH_QUEUES(bp); /* account for fcoe queue */ #ifdef BCM_CNIC @@ -1703,13 +1666,14 @@ static void bnx2x_set_rx_buf_size(struct bnx2x *bp) static int bnx2x_init_rss_pf(struct bnx2x *bp) { int i; + u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0}; u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp); /* Prepare the initial contents fo the indirection table if RSS is * enabled */ - for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++) - bp->rss_conf_obj.ind_table[i] = + for (i = 0; i < sizeof(ind_table); i++) + ind_table[i] = bp->fp->cl_id + ethtool_rxfh_indir_default(i, num_eth_queues); @@ -1721,11 +1685,12 @@ static int bnx2x_init_rss_pf(struct bnx2x *bp) * For 57712 and newer on the other hand it's a per-function * configuration. */ - return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp)); + return bnx2x_config_rss_eth(bp, ind_table, + bp->port.pmf || !CHIP_IS_E1x(bp)); } int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, - bool config_hash) + u8 *ind_table, bool config_hash) { struct bnx2x_config_rss_params params = {NULL}; int i; @@ -1748,15 +1713,11 @@ int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags); __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags); __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags); - if (rss_obj->udp_rss_v4) - __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags); - if (rss_obj->udp_rss_v6) - __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags); /* Hash bits */ params.rss_result_mask = MULTI_MASK; - memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table)); + memcpy(params.ind_table, ind_table, sizeof(params.ind_table)); if (config_hash) { /* RSS keys */ @@ -1793,7 +1754,7 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp) int rc; unsigned long ramrod_flags = 0, vlan_mac_flags = 0; struct bnx2x_mcast_ramrod_params rparam = {NULL}; - struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj; + struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj; /***************** Cleanup MACs' object first *************************/ @@ -1804,7 +1765,7 @@ static void bnx2x_squeeze_objects(struct bnx2x *bp) /* Clean ETH primary MAC */ __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags); - rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags, + rc = mac_obj->delete_all(bp, &bp->fp->mac_obj, &vlan_mac_flags, &ramrod_flags); if (rc != 0) BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc); @@ -1890,16 +1851,11 @@ bool bnx2x_test_firmware_version(struct bnx2x *bp, bool is_err) static void bnx2x_bz_fp(struct bnx2x *bp, int index) { struct bnx2x_fastpath *fp = &bp->fp[index]; - struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[index]; - - int cos; struct napi_struct orig_napi = fp->napi; - struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info; /* bzero bnx2x_fastpath contents */ - if (bp->stats_init) { - memset(fp->tpa_info, 0, sizeof(*fp->tpa_info)); + if (bp->stats_init) memset(fp, 0, sizeof(*fp)); - } else { + else { /* Keep Queue statistics */ struct bnx2x_eth_q_stats *tmp_eth_q_stats; struct bnx2x_eth_q_stats_old *tmp_eth_q_stats_old; @@ -1907,27 +1863,26 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index) tmp_eth_q_stats = kzalloc(sizeof(struct bnx2x_eth_q_stats), GFP_KERNEL); if (tmp_eth_q_stats) - memcpy(tmp_eth_q_stats, &fp_stats->eth_q_stats, + memcpy(tmp_eth_q_stats, &fp->eth_q_stats, sizeof(struct bnx2x_eth_q_stats)); tmp_eth_q_stats_old = kzalloc(sizeof(struct bnx2x_eth_q_stats_old), GFP_KERNEL); if (tmp_eth_q_stats_old) - memcpy(tmp_eth_q_stats_old, &fp_stats->eth_q_stats_old, + memcpy(tmp_eth_q_stats_old, &fp->eth_q_stats_old, sizeof(struct bnx2x_eth_q_stats_old)); - memset(fp->tpa_info, 0, sizeof(*fp->tpa_info)); memset(fp, 0, sizeof(*fp)); if (tmp_eth_q_stats) { - memcpy(&fp_stats->eth_q_stats, tmp_eth_q_stats, - sizeof(struct bnx2x_eth_q_stats)); + memcpy(&fp->eth_q_stats, tmp_eth_q_stats, + sizeof(struct bnx2x_eth_q_stats)); kfree(tmp_eth_q_stats); } if (tmp_eth_q_stats_old) { - memcpy(&fp_stats->eth_q_stats_old, tmp_eth_q_stats_old, + memcpy(&fp->eth_q_stats_old, tmp_eth_q_stats_old, sizeof(struct bnx2x_eth_q_stats_old)); kfree(tmp_eth_q_stats_old); } @@ -1936,7 +1891,7 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index) /* Restore the NAPI object as it has been already initialized */ fp->napi = orig_napi; - fp->tpa_info = orig_tpa_info; + fp->bp = bp; fp->index = index; if (IS_ETH_FP(fp)) @@ -1945,16 +1900,6 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index) /* Special queues support only one CoS */ fp->max_cos = 1; - /* Init txdata pointers */ -#ifdef BCM_CNIC - if (IS_FCOE_FP(fp)) - fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)]; -#endif - if (IS_ETH_FP(fp)) - for_each_cos_in_tx_queue(fp, cos) - fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos * - BNX2X_NUM_ETH_QUEUES(bp) + index]; - /* * set the tpa flag for each queue. The tpa flag determines the queue * minimal size so it must be set prior to queue memory allocation @@ -2004,13 +1949,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) /* * Zero fastpath structures preserving invariants like napi, which are * allocated only once, fp index, max_cos, bp pointer. - * Also set fp->disable_tpa and txdata_ptr. + * Also set fp->disable_tpa. */ DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues); for_each_queue(bp, i) bnx2x_bz_fp(bp, i); - memset(bp->bnx2x_txq, 0, bp->bnx2x_txq_size * - sizeof(struct bnx2x_fp_txdata)); /* Set the receive queues buffer size */ @@ -2233,7 +2176,6 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) break; case LOAD_DIAG: - case LOAD_LOOPBACK_EXT: bp->state = BNX2X_STATE_DIAG; break; @@ -2253,7 +2195,6 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) /* re-read iscsi info */ bnx2x_get_iscsi_info(bp); bnx2x_setup_cnic_irq_info(bp); - bnx2x_setup_cnic_info(bp); if (bp->state == BNX2X_STATE_OPEN) bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD); #endif @@ -2274,10 +2215,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) return -EBUSY; } - /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */ - if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG)) - bnx2x_dcbx_init(bp, false); - + bnx2x_dcbx_init(bp); return 0; #ifndef BNX2X_STOP_ON_ERROR @@ -2360,7 +2298,6 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) /* Stop Tx */ bnx2x_tx_disable(bp); - netdev_reset_tc(bp->dev); #ifdef BCM_CNIC bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD); @@ -2519,8 +2456,8 @@ int bnx2x_poll(struct napi_struct *napi, int budget) #endif for_each_cos_in_tx_queue(fp, cos) - if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos])) - bnx2x_tx_int(bp, fp->txdata_ptr[cos]); + if (bnx2x_tx_queue_has_work(&fp->txdata[cos])) + bnx2x_tx_int(bp, &fp->txdata[cos]); if (bnx2x_has_rx_work(fp)) { @@ -2897,6 +2834,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); + struct bnx2x_fastpath *fp; struct netdev_queue *txq; struct bnx2x_fp_txdata *txdata; struct sw_tx_bd *tx_buf; @@ -2906,7 +2844,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) struct eth_tx_parse_bd_e2 *pbd_e2 = NULL; u32 pbd_e2_parsing_data = 0; u16 pkt_prod, bd_prod; - int nbd, txq_index; + int nbd, txq_index, fp_index, txdata_index; dma_addr_t mapping; u32 xmit_type = bnx2x_xmit_type(bp, skb); int i; @@ -2925,12 +2863,31 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + FCOE_PRESENT); - txdata = &bp->bnx2x_txq[txq_index]; + /* decode the fastpath index and the cos index from the txq */ + fp_index = TXQ_TO_FP(txq_index); + txdata_index = TXQ_TO_COS(txq_index); + +#ifdef BCM_CNIC + /* + * Override the above for the FCoE queue: + * - FCoE fp entry is right after the ETH entries. + * - FCoE L2 queue uses bp->txdata[0] only. + */ + if (unlikely(!NO_FCOE(bp) && (txq_index == + bnx2x_fcoe_tx(bp, txq_index)))) { + fp_index = FCOE_IDX; + txdata_index = 0; + } +#endif /* enable this debug print to view the transmission queue being used DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n", txq_index, fp_index, txdata_index); */ + /* locate the fastpath and the txdata */ + fp = &bp->fp[fp_index]; + txdata = &fp->txdata[txdata_index]; + /* enable this debug print to view the tranmission details DP(NETIF_MSG_TX_QUEUED, "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n", @@ -2938,7 +2895,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) if (unlikely(bnx2x_tx_avail(bp, txdata) < (skb_shinfo(skb)->nr_frags + 3))) { - bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++; + fp->eth_q_stats.driver_xoff++; netif_tx_stop_queue(txq); BNX2X_ERR("BUG! Tx ring full when queue awake!\n"); return NETDEV_TX_BUSY; @@ -3220,7 +3177,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) * fp->bd_tx_cons */ smp_mb(); - bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++; + fp->eth_q_stats.driver_xoff++; if (bnx2x_tx_avail(bp, txdata) >= MAX_SKB_FRAGS + 4) netif_tx_wake_queue(txq); } @@ -3286,7 +3243,7 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc) /* configure traffic class to transmission queue mapping */ for (cos = 0; cos < bp->max_cos; cos++) { count = BNX2X_NUM_ETH_QUEUES(bp); - offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp); + offset = cos * MAX_TXQS_PER_COS; netdev_set_tc_queue(dev, cos, count, offset); DP(BNX2X_MSG_SP | NETIF_MSG_IFUP, "mapping tc %d to offset %d count %d\n", @@ -3385,7 +3342,7 @@ static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index) if (!skip_tx_queue(bp, fp_index)) { /* fastpath tx rings: tx_buf tx_desc */ for_each_cos_in_tx_queue(fp, cos) { - struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; + struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; DP(NETIF_MSG_IFDOWN, "freeing tx memory of fp %d cos %d cid %d\n", @@ -3457,7 +3414,7 @@ static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp, cqe_ring_prod); fp->rx_pkt = fp->rx_calls = 0; - bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt; + fp->eth_q_stats.rx_skb_alloc_failed += failure_cnt; return i - failure_cnt; } @@ -3542,7 +3499,7 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index) if (!skip_tx_queue(bp, index)) { /* fastpath tx rings: tx_buf tx_desc */ for_each_cos_in_tx_queue(fp, cos) { - struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; + struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; DP(NETIF_MSG_IFUP, "allocating tx memory of fp %d cos %d\n", @@ -3625,7 +3582,7 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp) #ifdef BCM_CNIC if (!NO_FCOE(bp)) /* FCoE */ - if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp))) + if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX)) /* we will fail load process instead of mark * NO_FCOE_FLAG */ @@ -3650,7 +3607,7 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp) */ /* move FCoE fp even NO_FCOE_FLAG is on */ - bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta); + bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta); #endif bp->num_queues -= delta; BNX2X_ERR("Adjusted num of queues from %d to %d\n", @@ -3662,11 +3619,7 @@ int bnx2x_alloc_fp_mem(struct bnx2x *bp) void bnx2x_free_mem_bp(struct bnx2x *bp) { - kfree(bp->fp->tpa_info); kfree(bp->fp); - kfree(bp->sp_objs); - kfree(bp->fp_stats); - kfree(bp->bnx2x_txq); kfree(bp->msix_table); kfree(bp->ilt); } @@ -3677,8 +3630,6 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp) struct msix_entry *tbl; struct bnx2x_ilt *ilt; int msix_table_size = 0; - int fp_array_size; - int i; /* * The biggest MSI-X table we might need is as a maximum number of fast @@ -3687,44 +3638,12 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp) msix_table_size = bp->igu_sb_cnt + 1; /* fp array: RSS plus CNIC related L2 queues */ - fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE; - BNX2X_DEV_INFO("fp_array_size %d", fp_array_size); - - fp = kcalloc(fp_array_size, sizeof(*fp), GFP_KERNEL); + fp = kcalloc(BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE, + sizeof(*fp), GFP_KERNEL); if (!fp) goto alloc_err; - for (i = 0; i < fp_array_size; i++) { - fp[i].tpa_info = - kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2, - sizeof(struct bnx2x_agg_info), GFP_KERNEL); - if (!(fp[i].tpa_info)) - goto alloc_err; - } - bp->fp = fp; - /* allocate sp objs */ - bp->sp_objs = kcalloc(fp_array_size, sizeof(struct bnx2x_sp_objs), - GFP_KERNEL); - if (!bp->sp_objs) - goto alloc_err; - - /* allocate fp_stats */ - bp->fp_stats = kcalloc(fp_array_size, sizeof(struct bnx2x_fp_stats), - GFP_KERNEL); - if (!bp->fp_stats) - goto alloc_err; - - /* Allocate memory for the transmission queues array */ - bp->bnx2x_txq_size = BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS; -#ifdef BCM_CNIC - bp->bnx2x_txq_size++; -#endif - bp->bnx2x_txq = kcalloc(bp->bnx2x_txq_size, - sizeof(struct bnx2x_fp_txdata), GFP_KERNEL); - if (!bp->bnx2x_txq) - goto alloc_err; - /* msix table */ tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL); if (!tbl) diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index daa894bd772a..7cd99b75347a 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -29,7 +29,6 @@ extern int load_count[2][3]; /* per-path: 0-common, 1-port0, 2-port1 */ extern int num_queues; -extern int int_mode; /************************ Macros ********************************/ #define BNX2X_PCI_FREE(x, y, size) \ @@ -95,7 +94,7 @@ void bnx2x_send_unload_done(struct bnx2x *bp); * @config_hash: re-configure RSS hash keys configuration */ int bnx2x_config_rss_pf(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj, - bool config_hash); + u8 *ind_table, bool config_hash); /** * bnx2x__init_func_obj - init function object @@ -245,14 +244,6 @@ int bnx2x_cnic_notify(struct bnx2x *bp, int cmd); * @bp: driver handle */ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp); - -/** - * bnx2x_setup_cnic_info - provides cnic with updated info - * - * @bp: driver handle - */ -void bnx2x_setup_cnic_info(struct bnx2x *bp); - #endif /** @@ -418,7 +409,7 @@ void bnx2x_ilt_set_info(struct bnx2x *bp); * * @bp: driver handle */ -void bnx2x_dcbx_init(struct bnx2x *bp, bool update_shmem); +void bnx2x_dcbx_init(struct bnx2x *bp); /** * bnx2x_set_power_state - set power state to the requested value. @@ -496,7 +487,7 @@ void bnx2x_netif_start(struct bnx2x *bp); * fills msix_table, requests vectors, updates num_queues * according to number of available vectors. */ -int bnx2x_enable_msix(struct bnx2x *bp); +int __devinit bnx2x_enable_msix(struct bnx2x *bp); /** * bnx2x_enable_msi - request msi mode from OS, updated internals accordingly @@ -737,7 +728,7 @@ static inline bool bnx2x_has_tx_work(struct bnx2x_fastpath *fp) { u8 cos; for_each_cos_in_tx_queue(fp, cos) - if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos])) + if (bnx2x_tx_queue_has_work(&fp->txdata[cos])) return true; return false; } @@ -789,10 +780,8 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp) { int i; - bp->num_napi_queues = bp->num_queues; - /* Add NAPI objects */ - for_each_napi_rx_queue(bp, i) + for_each_rx_queue(bp, i) netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), bnx2x_poll, BNX2X_NAPI_WEIGHT); } @@ -801,12 +790,10 @@ static inline void bnx2x_del_all_napi(struct bnx2x *bp) { int i; - for_each_napi_rx_queue(bp, i) + for_each_rx_queue(bp, i) netif_napi_del(&bnx2x_fp(bp, i, napi)); } -void bnx2x_set_int_mode(struct bnx2x *bp); - static inline void bnx2x_disable_msi(struct bnx2x *bp) { if (bp->flags & USING_MSIX_FLAG) { @@ -878,9 +865,11 @@ static inline int func_by_vn(struct bnx2x *bp, int vn) return 2 * vn + BP_PORT(bp); } -static inline int bnx2x_config_rss_eth(struct bnx2x *bp, bool config_hash) +static inline int bnx2x_config_rss_eth(struct bnx2x *bp, u8 *ind_table, + bool config_hash) { - return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, config_hash); + return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, ind_table, + config_hash); } /** @@ -986,8 +975,8 @@ static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp, struct bnx2x *bp = fp->bp; /* Configure classification DBs */ - bnx2x_init_mac_obj(bp, &bnx2x_sp_obj(bp, fp).mac_obj, fp->cl_id, - fp->cid, BP_FUNC(bp), bnx2x_sp(bp, mac_rdata), + bnx2x_init_mac_obj(bp, &fp->mac_obj, fp->cl_id, fp->cid, + BP_FUNC(bp), bnx2x_sp(bp, mac_rdata), bnx2x_sp_mapping(bp, mac_rdata), BNX2X_FILTER_MAC_PENDING, &bp->sp_state, obj_type, @@ -1079,14 +1068,12 @@ static inline u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp) } static inline void bnx2x_init_txdata(struct bnx2x *bp, - struct bnx2x_fp_txdata *txdata, u32 cid, - int txq_index, __le16 *tx_cons_sb, - struct bnx2x_fastpath *fp) + struct bnx2x_fp_txdata *txdata, u32 cid, int txq_index, + __le16 *tx_cons_sb) { txdata->cid = cid; txdata->txq_index = txq_index; txdata->tx_cons_sb = tx_cons_sb; - txdata->parent_fp = fp; DP(NETIF_MSG_IFUP, "created tx data cid %d, txq %d\n", txdata->cid, txdata->txq_index); @@ -1120,13 +1107,18 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp) bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp); bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp, BNX2X_FCOE_ETH_CL_ID_IDX); - bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID(bp); + /** Current BNX2X_FCOE_ETH_CID deffinition implies not more than + * 16 ETH clients per function when CNIC is enabled! + * + * Fix it ASAP!!! + */ + bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID; bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID; bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id; bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX; - bnx2x_init_txdata(bp, bnx2x_fcoe(bp, txdata_ptr[0]), - fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX, - fp); + + bnx2x_init_txdata(bp, &bnx2x_fcoe(bp, txdata[0]), + fp->cid, FCOE_TXQ_IDX(bp), BNX2X_FCOE_L2_TX_INDEX); DP(NETIF_MSG_IFUP, "created fcoe tx data (fp index %d)\n", fp->index); @@ -1143,8 +1135,8 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp) /* No multi-CoS for FCoE L2 client */ BUG_ON(fp->max_cos != 1); - bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, - &fp->cid, 1, BP_FUNC(bp), bnx2x_sp(bp, q_rdata), + bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, &fp->cid, 1, + BP_FUNC(bp), bnx2x_sp(bp, q_rdata), bnx2x_sp_mapping(bp, q_rdata), q_type); DP(NETIF_MSG_IFUP, diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c index 8a73374e52a7..4f9244bd7530 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c @@ -972,26 +972,23 @@ void bnx2x_dcbx_init_params(struct bnx2x *bp) bp->dcbx_config_params.admin_default_priority = 0; } -void bnx2x_dcbx_init(struct bnx2x *bp, bool update_shmem) +void bnx2x_dcbx_init(struct bnx2x *bp) { u32 dcbx_lldp_params_offset = SHMEM_LLDP_DCBX_PARAMS_NONE; - /* only PMF can send ADMIN msg to MFW in old MFW versions */ - if ((!bp->port.pmf) && (!(bp->flags & BC_SUPPORTS_DCBX_MSG_NON_PMF))) - return; - if (bp->dcbx_enabled <= 0) return; /* validate: * chip of good for dcbx version, * dcb is wanted + * the function is pmf * shmem2 contains DCBX support fields */ DP(BNX2X_MSG_DCB, "dcb_state %d bp->port.pmf %d\n", bp->dcb_state, bp->port.pmf); - if (bp->dcb_state == BNX2X_DCB_STATE_ON && + if (bp->dcb_state == BNX2X_DCB_STATE_ON && bp->port.pmf && SHMEM2_HAS(bp, dcbx_lldp_params_offset)) { dcbx_lldp_params_offset = SHMEM2_RD(bp, dcbx_lldp_params_offset); @@ -1002,23 +999,12 @@ void bnx2x_dcbx_init(struct bnx2x *bp, bool update_shmem) bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 0); if (SHMEM_LLDP_DCBX_PARAMS_NONE != dcbx_lldp_params_offset) { - /* need HW lock to avoid scenario of two drivers - * writing in parallel to shmem - */ - bnx2x_acquire_hw_lock(bp, - HW_LOCK_RESOURCE_DCBX_ADMIN_MIB); - if (update_shmem) - bnx2x_dcbx_admin_mib_updated_params(bp, - dcbx_lldp_params_offset); + bnx2x_dcbx_admin_mib_updated_params(bp, + dcbx_lldp_params_offset); /* Let HW start negotiation */ bnx2x_fw_command(bp, DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG, 0); - /* release HW lock only after MFW acks that it finished - * reading values from shmem - */ - bnx2x_release_hw_lock(bp, - HW_LOCK_RESOURCE_DCBX_ADMIN_MIB); } } } @@ -2077,8 +2063,10 @@ static u8 bnx2x_dcbnl_set_all(struct net_device *netdev) "Handling parity error recovery. Try again later\n"); return 1; } - if (netif_running(bp->dev)) - bnx2x_dcbx_init(bp, true); + if (netif_running(bp->dev)) { + bnx2x_nic_unload(bp, UNLOAD_NORMAL); + rc = bnx2x_nic_load(bp, LOAD_NORMAL); + } DP(BNX2X_MSG_DCB, "set_dcbx_params done (%d)\n", rc); if (rc) return 1; diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 70c0881ce5a0..bf30e2829285 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -826,7 +826,7 @@ static void bnx2x_get_drvinfo(struct net_device *dev, ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver); strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); info->n_stats = BNX2X_NUM_STATS; - info->testinfo_len = BNX2X_NUM_TESTS(bp); + info->testinfo_len = BNX2X_NUM_TESTS; info->eedump_len = bp->common.flash_size; info->regdump_len = bnx2x_get_regs_len(dev); } @@ -1533,14 +1533,16 @@ static int bnx2x_set_pauseparam(struct net_device *dev, return 0; } -char *bnx2x_tests_str_arr[BNX2X_NUM_TESTS_SF] = { - "register_test (offline) ", - "memory_test (offline) ", - "int_loopback_test (offline)", - "ext_loopback_test (offline)", - "nvram_test (online) ", - "interrupt_test (online) ", - "link_test (online) " +static const struct { + char string[ETH_GSTRING_LEN]; +} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = { + { "register_test (offline)" }, + { "memory_test (offline)" }, + { "loopback_test (offline)" }, + { "nvram_test (online)" }, + { "interrupt_test (online)" }, + { "link_test (online)" }, + { "idle check (online)" } }; static u32 bnx2x_eee_to_adv(u32 eee_adv) @@ -1941,14 +1943,6 @@ static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up, u8 is_serdes) if (cnt <= 0 && bnx2x_link_test(bp, is_serdes)) DP(BNX2X_MSG_ETHTOOL, "Timeout waiting for link up\n"); - - cnt = 1400; - while (!bp->link_vars.link_up && cnt--) - msleep(20); - - if (cnt <= 0 && !bp->link_vars.link_up) - DP(BNX2X_MSG_ETHTOOL, - "Timeout waiting for link init\n"); } } @@ -1959,7 +1953,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode) unsigned char *packet; struct bnx2x_fastpath *fp_rx = &bp->fp[0]; struct bnx2x_fastpath *fp_tx = &bp->fp[0]; - struct bnx2x_fp_txdata *txdata = fp_tx->txdata_ptr[0]; + struct bnx2x_fp_txdata *txdata = &fp_tx->txdata[0]; u16 tx_start_idx, tx_idx; u16 rx_start_idx, rx_idx; u16 pkt_prod, bd_prod; @@ -1974,16 +1968,13 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode) u16 len; int rc = -ENODEV; u8 *data; - struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, - txdata->txq_index); + struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txdata->txq_index); /* check the loopback mode */ switch (loopback_mode) { case BNX2X_PHY_LOOPBACK: - if (bp->link_params.loopback_mode != LOOPBACK_XGXS) { - DP(BNX2X_MSG_ETHTOOL, "PHY loopback not supported\n"); + if (bp->link_params.loopback_mode != LOOPBACK_XGXS) return -EINVAL; - } break; case BNX2X_MAC_LOOPBACK: if (CHIP_IS_E3(bp)) { @@ -2000,13 +1991,6 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode) bnx2x_phy_init(&bp->link_params, &bp->link_vars); break; - case BNX2X_EXT_LOOPBACK: - if (bp->link_params.loopback_mode != LOOPBACK_EXT) { - DP(BNX2X_MSG_ETHTOOL, - "Can't configure external loopback\n"); - return -EINVAL; - } - break; default: DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n"); return -EINVAL; @@ -2178,38 +2162,6 @@ static int bnx2x_test_loopback(struct bnx2x *bp) return rc; } -static int bnx2x_test_ext_loopback(struct bnx2x *bp) -{ - int rc; - u8 is_serdes = - (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0; - - if (BP_NOMCP(bp)) - return -ENODEV; - - if (!netif_running(bp->dev)) - return BNX2X_EXT_LOOPBACK_FAILED; - - bnx2x_nic_unload(bp, UNLOAD_NORMAL); - rc = bnx2x_nic_load(bp, LOAD_LOOPBACK_EXT); - if (rc) { - DP(BNX2X_MSG_ETHTOOL, - "Can't perform self-test, nic_load (for external lb) failed\n"); - return -ENODEV; - } - bnx2x_wait_for_link(bp, 1, is_serdes); - - bnx2x_netif_stop(bp, 1); - - rc = bnx2x_run_loopback(bp, BNX2X_EXT_LOOPBACK); - if (rc) - DP(BNX2X_MSG_ETHTOOL, "EXT loopback failed (res %d)\n", rc); - - bnx2x_netif_start(bp); - - return rc; -} - #define CRC32_RESIDUAL 0xdebb20e3 static int bnx2x_test_nvram(struct bnx2x *bp) @@ -2292,7 +2244,7 @@ static int bnx2x_test_intr(struct bnx2x *bp) return -ENODEV; } - params.q_obj = &bp->sp_objs->q_obj; + params.q_obj = &bp->fp->q_obj; params.cmd = BNX2X_Q_CMD_EMPTY; __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags); @@ -2305,31 +2257,24 @@ static void bnx2x_self_test(struct net_device *dev, { struct bnx2x *bp = netdev_priv(dev); u8 is_serdes; - int rc; - if (bp->recovery_state != BNX2X_RECOVERY_DONE) { netdev_err(bp->dev, "Handling parity error recovery. Try again later\n"); etest->flags |= ETH_TEST_FL_FAILED; return; } - DP(BNX2X_MSG_ETHTOOL, - "Self-test command parameters: offline = %d, external_lb = %d\n", - (etest->flags & ETH_TEST_FL_OFFLINE), - (etest->flags & ETH_TEST_FL_EXTERNAL_LB)>>2); - memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS(bp)); + memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS); - if (!netif_running(dev)) { - DP(BNX2X_MSG_ETHTOOL, - "Can't perform self-test when interface is down\n"); + if (!netif_running(dev)) return; - } + /* offline tests are not supported in MF mode */ + if (IS_MF(bp)) + etest->flags &= ~ETH_TEST_FL_OFFLINE; is_serdes = (bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) > 0; - /* offline tests are not supported in MF mode */ - if ((etest->flags & ETH_TEST_FL_OFFLINE) && !IS_MF(bp)) { + if (etest->flags & ETH_TEST_FL_OFFLINE) { int port = BP_PORT(bp); u32 val; u8 link_up; @@ -2342,14 +2287,7 @@ static void bnx2x_self_test(struct net_device *dev, link_up = bp->link_vars.link_up; bnx2x_nic_unload(bp, UNLOAD_NORMAL); - rc = bnx2x_nic_load(bp, LOAD_DIAG); - if (rc) { - etest->flags |= ETH_TEST_FL_FAILED; - DP(BNX2X_MSG_ETHTOOL, - "Can't perform self-test, nic_load (for offline) failed\n"); - return; - } - + bnx2x_nic_load(bp, LOAD_DIAG); /* wait until link state is restored */ bnx2x_wait_for_link(bp, 1, is_serdes); @@ -2362,51 +2300,30 @@ static void bnx2x_self_test(struct net_device *dev, etest->flags |= ETH_TEST_FL_FAILED; } - buf[2] = bnx2x_test_loopback(bp); /* internal LB */ + buf[2] = bnx2x_test_loopback(bp); if (buf[2] != 0) etest->flags |= ETH_TEST_FL_FAILED; - if (etest->flags & ETH_TEST_FL_EXTERNAL_LB) { - buf[3] = bnx2x_test_ext_loopback(bp); /* external LB */ - if (buf[3] != 0) - etest->flags |= ETH_TEST_FL_FAILED; - etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE; - } - bnx2x_nic_unload(bp, UNLOAD_NORMAL); /* restore input for TX port IF */ REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val); - rc = bnx2x_nic_load(bp, LOAD_NORMAL); - if (rc) { - etest->flags |= ETH_TEST_FL_FAILED; - DP(BNX2X_MSG_ETHTOOL, - "Can't perform self-test, nic_load (for online) failed\n"); - return; - } + + bnx2x_nic_load(bp, LOAD_NORMAL); /* wait until link state is restored */ bnx2x_wait_for_link(bp, link_up, is_serdes); } if (bnx2x_test_nvram(bp) != 0) { - if (!IS_MF(bp)) - buf[4] = 1; - else - buf[0] = 1; + buf[3] = 1; etest->flags |= ETH_TEST_FL_FAILED; } if (bnx2x_test_intr(bp) != 0) { - if (!IS_MF(bp)) - buf[5] = 1; - else - buf[1] = 1; + buf[4] = 1; etest->flags |= ETH_TEST_FL_FAILED; } if (bnx2x_link_test(bp, is_serdes) != 0) { - if (!IS_MF(bp)) - buf[6] = 1; - else - buf[2] = 1; + buf[5] = 1; etest->flags |= ETH_TEST_FL_FAILED; } @@ -2451,7 +2368,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset) return num_stats; case ETH_SS_TEST: - return BNX2X_NUM_TESTS(bp); + return BNX2X_NUM_TESTS; default: return -EINVAL; @@ -2461,7 +2378,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset) static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) { struct bnx2x *bp = netdev_priv(dev); - int i, j, k, offset, start; + int i, j, k; char queue_name[MAX_QUEUE_NAME_LEN+1]; switch (stringset) { @@ -2492,17 +2409,7 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) break; case ETH_SS_TEST: - /* First 4 tests cannot be done in MF mode */ - if (!IS_MF(bp)) - start = 0; - else - start = 4; - for (i = 0, j = start; j < (start + BNX2X_NUM_TESTS(bp)); - i++, j++) { - offset = sprintf(buf+32*i, "%s", - bnx2x_tests_str_arr[j]); - *(buf+offset) = '\0'; - } + memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr)); break; } } @@ -2516,7 +2423,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev, if (is_multi(bp)) { for_each_eth_queue(bp, i) { - hw_stats = (u32 *)&bp->fp_stats[i].eth_q_stats; + hw_stats = (u32 *)&bp->fp[i].eth_q_stats; for (j = 0; j < BNX2X_NUM_Q_STATS; j++) { if (bnx2x_q_stats_arr[j].size == 0) { /* skip this counter */ @@ -2600,41 +2507,6 @@ static int bnx2x_set_phys_id(struct net_device *dev, return 0; } -static int bnx2x_get_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info) -{ - - switch (info->flow_type) { - case TCP_V4_FLOW: - case TCP_V6_FLOW: - info->data = RXH_IP_SRC | RXH_IP_DST | - RXH_L4_B_0_1 | RXH_L4_B_2_3; - break; - case UDP_V4_FLOW: - if (bp->rss_conf_obj.udp_rss_v4) - info->data = RXH_IP_SRC | RXH_IP_DST | - RXH_L4_B_0_1 | RXH_L4_B_2_3; - else - info->data = RXH_IP_SRC | RXH_IP_DST; - break; - case UDP_V6_FLOW: - if (bp->rss_conf_obj.udp_rss_v6) - info->data = RXH_IP_SRC | RXH_IP_DST | - RXH_L4_B_0_1 | RXH_L4_B_2_3; - else - info->data = RXH_IP_SRC | RXH_IP_DST; - break; - case IPV4_FLOW: - case IPV6_FLOW: - info->data = RXH_IP_SRC | RXH_IP_DST; - break; - default: - info->data = 0; - break; - } - - return 0; -} - static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rules __always_unused) { @@ -2644,102 +2516,7 @@ static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, case ETHTOOL_GRXRINGS: info->data = BNX2X_NUM_ETH_QUEUES(bp); return 0; - case ETHTOOL_GRXFH: - return bnx2x_get_rss_flags(bp, info); - default: - DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n"); - return -EOPNOTSUPP; - } -} - -static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info) -{ - int udp_rss_requested; - - DP(BNX2X_MSG_ETHTOOL, - "Set rss flags command parameters: flow type = %d, data = %llu\n", - info->flow_type, info->data); - - switch (info->flow_type) { - case TCP_V4_FLOW: - case TCP_V6_FLOW: - /* For TCP only 4-tupple hash is supported */ - if (info->data ^ (RXH_IP_SRC | RXH_IP_DST | - RXH_L4_B_0_1 | RXH_L4_B_2_3)) { - DP(BNX2X_MSG_ETHTOOL, - "Command parameters not supported\n"); - return -EINVAL; - } else { - return 0; - } - - case UDP_V4_FLOW: - case UDP_V6_FLOW: - /* For UDP either 2-tupple hash or 4-tupple hash is supported */ - if (info->data == (RXH_IP_SRC | RXH_IP_DST | - RXH_L4_B_0_1 | RXH_L4_B_2_3)) - udp_rss_requested = 1; - else if (info->data == (RXH_IP_SRC | RXH_IP_DST)) - udp_rss_requested = 0; - else - return -EINVAL; - if ((info->flow_type == UDP_V4_FLOW) && - (bp->rss_conf_obj.udp_rss_v4 != udp_rss_requested)) { - bp->rss_conf_obj.udp_rss_v4 = udp_rss_requested; - DP(BNX2X_MSG_ETHTOOL, - "rss re-configured, UDP 4-tupple %s\n", - udp_rss_requested ? "enabled" : "disabled"); - return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0); - } else if ((info->flow_type == UDP_V6_FLOW) && - (bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) { - bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested; - return bnx2x_config_rss_pf(bp, &bp->rss_conf_obj, 0); - DP(BNX2X_MSG_ETHTOOL, - "rss re-configured, UDP 4-tupple %s\n", - udp_rss_requested ? "enabled" : "disabled"); - } else { - return 0; - } - case IPV4_FLOW: - case IPV6_FLOW: - /* For IP only 2-tupple hash is supported */ - if (info->data ^ (RXH_IP_SRC | RXH_IP_DST)) { - DP(BNX2X_MSG_ETHTOOL, - "Command parameters not supported\n"); - return -EINVAL; - } else { - return 0; - } - case SCTP_V4_FLOW: - case AH_ESP_V4_FLOW: - case AH_V4_FLOW: - case ESP_V4_FLOW: - case SCTP_V6_FLOW: - case AH_ESP_V6_FLOW: - case AH_V6_FLOW: - case ESP_V6_FLOW: - case IP_USER_FLOW: - case ETHER_FLOW: - /* RSS is not supported for these protocols */ - if (info->data) { - DP(BNX2X_MSG_ETHTOOL, - "Command parameters not supported\n"); - return -EINVAL; - } else { - return 0; - } - default: - return -EINVAL; - } -} -static int bnx2x_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info) -{ - struct bnx2x *bp = netdev_priv(dev); - - switch (info->cmd) { - case ETHTOOL_SRXFH: - return bnx2x_set_rss_flags(bp, info); default: DP(BNX2X_MSG_ETHTOOL, "Command parameters not supported\n"); return -EOPNOTSUPP; @@ -2779,6 +2556,7 @@ static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir) { struct bnx2x *bp = netdev_priv(dev); size_t i; + u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0}; for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) { /* @@ -2790,88 +2568,10 @@ static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir) * align the received table to the Client ID of the leading RSS * queue */ - bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id; + ind_table[i] = indir[i] + bp->fp->cl_id; } - return bnx2x_config_rss_eth(bp, false); -} - -/** - * bnx2x_get_channels - gets the number of RSS queues. - * - * @dev: net device - * @channels: returns the number of max / current queues - */ -static void bnx2x_get_channels(struct net_device *dev, - struct ethtool_channels *channels) -{ - struct bnx2x *bp = netdev_priv(dev); - - channels->max_combined = BNX2X_MAX_RSS_COUNT(bp); - channels->combined_count = BNX2X_NUM_ETH_QUEUES(bp); -} - -/** - * bnx2x_change_num_queues - change the number of RSS queues. - * - * @bp: bnx2x private structure - * - * Re-configure interrupt mode to get the new number of MSI-X - * vectors and re-add NAPI objects. - */ -static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss) -{ - bnx2x_del_all_napi(bp); - bnx2x_disable_msi(bp); - BNX2X_NUM_QUEUES(bp) = num_rss + NON_ETH_CONTEXT_USE; - bnx2x_set_int_mode(bp); - bnx2x_add_all_napi(bp); -} - -/** - * bnx2x_set_channels - sets the number of RSS queues. - * - * @dev: net device - * @channels: includes the number of queues requested - */ -static int bnx2x_set_channels(struct net_device *dev, - struct ethtool_channels *channels) -{ - struct bnx2x *bp = netdev_priv(dev); - - - DP(BNX2X_MSG_ETHTOOL, - "set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n", - channels->rx_count, channels->tx_count, channels->other_count, - channels->combined_count); - - /* We don't support separate rx / tx channels. - * We don't allow setting 'other' channels. - */ - if (channels->rx_count || channels->tx_count || channels->other_count - || (channels->combined_count == 0) || - (channels->combined_count > BNX2X_MAX_RSS_COUNT(bp))) { - DP(BNX2X_MSG_ETHTOOL, "command parameters not supported\n"); - return -EINVAL; - } - - /* Check if there was a change in the active parameters */ - if (channels->combined_count == BNX2X_NUM_ETH_QUEUES(bp)) { - DP(BNX2X_MSG_ETHTOOL, "No change in active parameters\n"); - return 0; - } - - /* Set the requested number of queues in bp context. - * Note that the actual number of queues created during load may be - * less than requested if memory is low. - */ - if (unlikely(!netif_running(dev))) { - bnx2x_change_num_queues(bp, channels->combined_count); - return 0; - } - bnx2x_nic_unload(bp, UNLOAD_NORMAL); - bnx2x_change_num_queues(bp, channels->combined_count); - return bnx2x_nic_load(bp, LOAD_NORMAL); + return bnx2x_config_rss_eth(bp, ind_table, false); } static const struct ethtool_ops bnx2x_ethtool_ops = { @@ -2901,12 +2601,9 @@ static const struct ethtool_ops bnx2x_ethtool_ops = { .set_phys_id = bnx2x_set_phys_id, .get_ethtool_stats = bnx2x_get_ethtool_stats, .get_rxnfc = bnx2x_get_rxnfc, - .set_rxnfc = bnx2x_set_rxnfc, .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size, .get_rxfh_indir = bnx2x_get_rxfh_indir, .set_rxfh_indir = bnx2x_set_rxfh_indir, - .get_channels = bnx2x_get_channels, - .set_channels = bnx2x_set_channels, .get_eee = bnx2x_get_eee, .set_eee = bnx2x_set_eee, }; diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index 6b776309e0a1..c61aa37298a3 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -1253,7 +1253,6 @@ struct drv_func_mb { #define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG 0xb0000000 #define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000 - #define REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF 0x00070401 #define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000 diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 8ddc78e0d945..a622bb7bf21d 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -104,7 +104,7 @@ MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature"); #define INT_MODE_INTx 1 #define INT_MODE_MSI 2 -int int_mode; +static int int_mode; module_param(int_mode, int, 0); MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X " "(1 INT#x; 2 MSI)"); @@ -758,7 +758,7 @@ void bnx2x_panic_dump(struct bnx2x *bp) /* Tx */ for_each_cos_in_tx_queue(fp, cos) { - txdata = *fp->txdata_ptr[cos]; + txdata = fp->txdata[cos]; BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n", i, txdata.tx_pkt_prod, txdata.tx_pkt_cons, txdata.tx_bd_prod, @@ -876,7 +876,7 @@ void bnx2x_panic_dump(struct bnx2x *bp) for_each_tx_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; for_each_cos_in_tx_queue(fp, cos) { - struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos]; + struct bnx2x_fp_txdata *txdata = &fp->txdata[cos]; start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10); end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245); @@ -1583,7 +1583,7 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data); int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data); enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX; - struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj; + struct bnx2x_queue_sp_obj *q_obj = &fp->q_obj; DP(BNX2X_MSG_SP, "fp %d cid %d got ramrod #%d state is %x type is %d\n", @@ -1710,7 +1710,7 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) /* Handle Rx or Tx according to SB id */ prefetch(fp->rx_cons_sb); for_each_cos_in_tx_queue(fp, cos) - prefetch(fp->txdata_ptr[cos]->tx_cons_sb); + prefetch(fp->txdata[cos].tx_cons_sb); prefetch(&fp->sb_running_index[SM_RX_ID]); napi_schedule(&bnx2x_fp(bp, fp->index, napi)); status &= ~mask; @@ -2124,11 +2124,6 @@ u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode) } } - if (load_mode == LOAD_LOOPBACK_EXT) { - struct link_params *lp = &bp->link_params; - lp->loopback_mode = LOOPBACK_EXT; - } - rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars); bnx2x_release_phy_lock(bp); @@ -2921,7 +2916,7 @@ static void bnx2x_pf_tx_q_prep(struct bnx2x *bp, struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init, u8 cos) { - txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping; + txq_init->dscr_map = fp->txdata[cos].tx_desc_mapping; txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos; txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; txq_init->fw_sb_id = fp->fw_sb_id; @@ -3035,9 +3030,9 @@ static void bnx2x_drv_info_ether_stat(struct bnx2x *bp) memcpy(ether_stat->version, DRV_MODULE_VERSION, ETH_STAT_INFO_VERSION_LEN - 1); - bp->sp_objs[0].mac_obj.get_n_elements(bp, &bp->sp_objs[0].mac_obj, - DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, - ether_stat->mac_local); + bp->fp[0].mac_obj.get_n_elements(bp, &bp->fp[0].mac_obj, + DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED, + ether_stat->mac_local); ether_stat->mtu_size = bp->dev->mtu; @@ -3068,11 +3063,11 @@ static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp) /* insert FCoE stats from ramrod response */ if (!NO_FCOE(bp)) { struct tstorm_per_queue_stats *fcoe_q_tstorm_stats = - &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)]. + &bp->fw_stats_data->queue_stats[FCOE_IDX]. tstorm_queue_statistics; struct xstorm_per_queue_stats *fcoe_q_xstorm_stats = - &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)]. + &bp->fw_stats_data->queue_stats[FCOE_IDX]. xstorm_queue_statistics; struct fcoe_statistics_params *fw_fcoe_stat = @@ -4628,11 +4623,11 @@ static void bnx2x_handle_classification_eqe(struct bnx2x *bp, case BNX2X_FILTER_MAC_PENDING: DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n"); #ifdef BCM_CNIC - if (cid == BNX2X_ISCSI_ETH_CID(bp)) + if (cid == BNX2X_ISCSI_ETH_CID) vlan_mac_obj = &bp->iscsi_l2_mac_obj; else #endif - vlan_mac_obj = &bp->sp_objs[cid].mac_obj; + vlan_mac_obj = &bp->fp[cid].mac_obj; break; case BNX2X_FILTER_MCAST_PENDING: @@ -4730,7 +4725,7 @@ static void bnx2x_after_function_update(struct bnx2x *bp) for_each_eth_queue(bp, q) { /* Set the appropriate Queue object */ fp = &bp->fp[q]; - queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; + queue_params.q_obj = &fp->q_obj; /* send the ramrod */ rc = bnx2x_queue_state_change(bp, &queue_params); @@ -4741,8 +4736,8 @@ static void bnx2x_after_function_update(struct bnx2x *bp) #ifdef BCM_CNIC if (!NO_FCOE(bp)) { - fp = &bp->fp[FCOE_IDX(bp)]; - queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; + fp = &bp->fp[FCOE_IDX]; + queue_params.q_obj = &fp->q_obj; /* clear pending completion bit */ __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags); @@ -4774,11 +4769,11 @@ static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj( { DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid); #ifdef BCM_CNIC - if (cid == BNX2X_FCOE_ETH_CID(bp)) - return &bnx2x_fcoe_sp_obj(bp, q_obj); + if (cid == BNX2X_FCOE_ETH_CID) + return &bnx2x_fcoe(bp, q_obj); else #endif - return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj; + return &bnx2x_fp(bp, CID_TO_FP(cid), q_obj); } static void bnx2x_eq_int(struct bnx2x *bp) @@ -5660,15 +5655,15 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx) /* init tx data */ for_each_cos_in_tx_queue(fp, cos) { - bnx2x_init_txdata(bp, fp->txdata_ptr[cos], - CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp), - FP_COS_TO_TXQ(fp, cos, bp), - BNX2X_TX_SB_INDEX_BASE + cos, fp); - cids[cos] = fp->txdata_ptr[cos]->cid; + bnx2x_init_txdata(bp, &fp->txdata[cos], + CID_COS_TO_TX_ONLY_CID(fp->cid, cos), + FP_COS_TO_TXQ(fp, cos), + BNX2X_TX_SB_INDEX_BASE + cos); + cids[cos] = fp->txdata[cos].cid; } - bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids, - fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata), + bnx2x_init_queue_obj(bp, &fp->q_obj, fp->cl_id, cids, fp->max_cos, + BP_FUNC(bp), bnx2x_sp(bp, q_rdata), bnx2x_sp_mapping(bp, q_rdata), q_type); /** @@ -5719,7 +5714,7 @@ static void bnx2x_init_tx_rings(struct bnx2x *bp) for_each_tx_queue(bp, i) for_each_cos_in_tx_queue(&bp->fp[i], cos) - bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]); + bnx2x_init_tx_ring_one(&bp->fp[i].txdata[cos]); } void bnx2x_nic_init(struct bnx2x *bp, u32 load_code) @@ -7068,10 +7063,12 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; for (i = 0; i < L2_ILT_LINES(bp); i++) { - ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt; + ilt->lines[cdu_ilt_start + i].page = + bp->context.vcxt + (ILT_PAGE_CIDS * i); ilt->lines[cdu_ilt_start + i].page_mapping = - bp->context[i].cxt_mapping; - ilt->lines[cdu_ilt_start + i].size = bp->context[i].size; + bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i); + /* cdu ilt pages are allocated manually so there's no need to + set the size */ } bnx2x_ilt_init_op(bp, INITOP_SET); @@ -7338,8 +7335,6 @@ static int bnx2x_init_hw_func(struct bnx2x *bp) void bnx2x_free_mem(struct bnx2x *bp) { - int i; - /* fastpath */ bnx2x_free_fp_mem(bp); /* end of fastpath */ @@ -7353,9 +7348,9 @@ void bnx2x_free_mem(struct bnx2x *bp) BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, sizeof(struct bnx2x_slowpath)); - for (i = 0; i < L2_ILT_LINES(bp); i++) - BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping, - bp->context[i].size); + BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping, + bp->context.size); + bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE); BNX2X_FREE(bp->ilt->lines); @@ -7441,8 +7436,6 @@ static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp) int bnx2x_alloc_mem(struct bnx2x *bp) { - int i, allocated, context_size; - #ifdef BCM_CNIC if (!CHIP_IS_E1x(bp)) /* size = the status block + ramrod buffers */ @@ -7472,29 +7465,11 @@ int bnx2x_alloc_mem(struct bnx2x *bp) if (bnx2x_alloc_fw_stats_mem(bp)) goto alloc_mem_err; - /* Allocate memory for CDU context: - * This memory is allocated separately and not in the generic ILT - * functions because CDU differs in few aspects: - * 1. There are multiple entities allocating memory for context - - * 'regular' driver, CNIC and SRIOV driver. Each separately controls - * its own ILT lines. - * 2. Since CDU page-size is not a single 4KB page (which is the case - * for the other ILT clients), to be efficient we want to support - * allocation of sub-page-size in the last entry. - * 3. Context pointers are used by the driver to pass to FW / update - * the context (for the other ILT clients the pointers are used just to - * free the memory during unload). - */ - context_size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp); + bp->context.size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp); + + BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping, + bp->context.size); - for (i = 0, allocated = 0; allocated < context_size; i++) { - bp->context[i].size = min(CDU_ILT_PAGE_SZ, - (context_size - allocated)); - BNX2X_PCI_ALLOC(bp->context[i].vcxt, - &bp->context[i].cxt_mapping, - bp->context[i].size); - allocated += bp->context[i].size; - } BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES); if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC)) @@ -7596,8 +7571,8 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set) __set_bit(RAMROD_COMP_WAIT, &ramrod_flags); /* Eth MAC is set on RSS leading client (fp[0]) */ - return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->sp_objs->mac_obj, - set, BNX2X_ETH_MAC, &ramrod_flags); + return bnx2x_set_mac_one(bp, bp->dev->dev_addr, &bp->fp->mac_obj, set, + BNX2X_ETH_MAC, &ramrod_flags); } int bnx2x_setup_leading(struct bnx2x *bp) @@ -7612,7 +7587,7 @@ int bnx2x_setup_leading(struct bnx2x *bp) * * In case of MSI-X it will also try to enable MSI-X. */ -void bnx2x_set_int_mode(struct bnx2x *bp) +static void __devinit bnx2x_set_int_mode(struct bnx2x *bp) { switch (int_mode) { case INT_MODE_MSI: @@ -7623,6 +7598,11 @@ void bnx2x_set_int_mode(struct bnx2x *bp) BNX2X_DEV_INFO("set number of queues to 1\n"); break; default: + /* Set number of queues for MSI-X mode */ + bnx2x_set_num_queues(bp); + + BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues); + /* if we can't use MSI-X we only need one fp, * so try to enable MSI-X with the requested number of fp's * and fallback to MSI or legacy INTx with one fp @@ -7763,8 +7743,6 @@ static void bnx2x_pf_q_prep_init(struct bnx2x *bp, { u8 cos; - int cxt_index, cxt_offset; - /* FCoE Queue uses Default SB, thus has no HC capabilities */ if (!IS_FCOE_FP(fp)) { __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags); @@ -7801,13 +7779,9 @@ static void bnx2x_pf_q_prep_init(struct bnx2x *bp, fp->index, init_params->max_cos); /* set the context pointers queue object */ - for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) { - cxt_index = fp->txdata_ptr[cos]->cid / ILT_PAGE_CIDS; - cxt_offset = fp->txdata_ptr[cos]->cid - (cxt_index * - ILT_PAGE_CIDS); + for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) init_params->cxts[cos] = - &bp->context[cxt_index].vcxt[cxt_offset].eth; - } + &bp->context.vcxt[fp->txdata[cos].cid].eth; } int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp, @@ -7872,7 +7846,7 @@ int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp, bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0); - q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; + q_params.q_obj = &fp->q_obj; /* We want to wait for completion in this context */ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); @@ -7945,7 +7919,7 @@ static int bnx2x_stop_queue(struct bnx2x *bp, int index) DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid); - q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj; + q_params.q_obj = &fp->q_obj; /* We want to wait for completion in this context */ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags); @@ -7956,7 +7930,7 @@ static int bnx2x_stop_queue(struct bnx2x *bp, int index) tx_index++){ /* ascertain this is a normal queue*/ - txdata = fp->txdata_ptr[tx_index]; + txdata = &fp->txdata[tx_index]; DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n", txdata->txq_index); @@ -8323,7 +8297,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) struct bnx2x_fastpath *fp = &bp->fp[i]; for_each_cos_in_tx_queue(fp, cos) - rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]); + rc = bnx2x_clean_tx_queue(bp, &fp->txdata[cos]); #ifdef BNX2X_STOP_ON_ERROR if (rc) return; @@ -8334,13 +8308,12 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) usleep_range(1000, 1000); /* Clean all ETH MACs */ - rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC, - false); + rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_ETH_MAC, false); if (rc < 0) BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc); /* Clean up UC list */ - rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC, + rc = bnx2x_del_all_macs(bp, &bp->fp[0].mac_obj, BNX2X_UC_LIST_MAC, true); if (rc < 0) BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n", @@ -9732,8 +9705,6 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp) bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ? BC_SUPPORTS_PFC_STATS : 0; - bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ? - BC_SUPPORTS_DCBX_MSG_NON_PMF : 0; boot_mode = SHMEM_RD(bp, dev_info.port_feature_config[BP_PORT(bp)].mba_config) & PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK; @@ -11047,7 +11018,7 @@ static int bnx2x_set_uc_list(struct bnx2x *bp) int rc; struct net_device *dev = bp->dev; struct netdev_hw_addr *ha; - struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj; + struct bnx2x_vlan_mac_obj *mac_obj = &bp->fp->mac_obj; unsigned long ramrod_flags = 0; /* First schedule a cleanup up of old configuration */ @@ -11722,7 +11693,7 @@ void bnx2x__init_func_obj(struct bnx2x *bp) /* must be called after sriov-enable */ static int bnx2x_set_qm_cid_count(struct bnx2x *bp) { - int cid_count = BNX2X_L2_MAX_CID(bp); + int cid_count = BNX2X_L2_CID_COUNT(bp); #ifdef BCM_CNIC cid_count += CNIC_CID_MAX; @@ -11767,7 +11738,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, struct bnx2x *bp; int pcie_width, pcie_speed; int rc, max_non_def_sbs; - int rx_count, tx_count, rss_count, doorbell_size; + int rx_count, tx_count, rss_count; /* * An estimated maximum supported CoS number according to the chip * version. @@ -11810,6 +11781,13 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev); + /* !!! FIXME !!! + * Do not allow the maximum SB count to grow above 16 + * since Special CIDs starts from 16*BNX2X_MULTI_TX_COS=48. + * We will use the FP_SB_MAX_E1x macro for this matter. + */ + max_non_def_sbs = min_t(int, FP_SB_MAX_E1x, max_non_def_sbs); + WARN_ON(!max_non_def_sbs); /* Maximum number of RSS queues: one IGU SB goes to CNIC */ @@ -11820,9 +11798,9 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, /* * Maximum number of netdev Tx queues: - * Maximum TSS queues * Maximum supported number of CoS + FCoE L2 + * Maximum TSS queues * Maximum supported number of CoS + FCoE L2 */ - tx_count = rss_count * max_cos_est + FCOE_PRESENT; + tx_count = MAX_TXQS_PER_COS * max_cos_est + FCOE_PRESENT; /* dev zeroed in init_etherdev */ dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count); @@ -11831,6 +11809,9 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, bp = netdev_priv(dev); + BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n", + tx_count, rx_count); + bp->igu_sb_cnt = max_non_def_sbs; bp->msg_enable = debug; pci_set_drvdata(pdev, dev); @@ -11843,9 +11824,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, BNX2X_DEV_INFO("max_non_def_sbs %d\n", max_non_def_sbs); - BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n", - tx_count, rx_count); - rc = bnx2x_init_bp(bp); if (rc) goto init_one_exit; @@ -11854,15 +11832,9 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, * Map doorbels here as we need the real value of bp->max_cos which * is initialized in bnx2x_init_bp(). */ - doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT); - if (doorbell_size > pci_resource_len(pdev, 2)) { - dev_err(&bp->pdev->dev, - "Cannot map doorbells, bar size too small, aborting\n"); - rc = -ENOMEM; - goto init_one_exit; - } bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2), - doorbell_size); + min_t(u64, BNX2X_DB_SIZE(bp), + pci_resource_len(pdev, 2))); if (!bp->doorbells) { dev_err(&bp->pdev->dev, "Cannot map doorbell space, aborting\n"); @@ -11880,12 +11852,8 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, #endif - - /* Set bp->num_queues for MSI-X mode*/ - bnx2x_set_num_queues(bp); - /* Configure interrupt mode: try to enable MSI-X/MSI if - * needed. + * needed, set bp->num_queues appropriately. */ bnx2x_set_int_mode(bp); @@ -12229,7 +12197,6 @@ static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp) static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count) { struct eth_spe *spe; - int cxt_index, cxt_offset; #ifdef BNX2X_STOP_ON_ERROR if (unlikely(bp->panic)) @@ -12252,16 +12219,10 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count) * ramrod */ if (type == ETH_CONNECTION_TYPE) { - if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) { - cxt_index = BNX2X_ISCSI_ETH_CID(bp) / - ILT_PAGE_CIDS; - cxt_offset = BNX2X_ISCSI_ETH_CID(bp) - - (cxt_index * ILT_PAGE_CIDS); - bnx2x_set_ctx_validation(bp, - &bp->context[cxt_index]. - vcxt[cxt_offset].eth, - BNX2X_ISCSI_ETH_CID(bp)); - } + if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) + bnx2x_set_ctx_validation(bp, &bp->context. + vcxt[BNX2X_ISCSI_ETH_CID].eth, + BNX2X_ISCSI_ETH_CID); } /* @@ -12614,21 +12575,6 @@ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp) cp->num_irq = 2; } -void bnx2x_setup_cnic_info(struct bnx2x *bp) -{ - struct cnic_eth_dev *cp = &bp->cnic_eth_dev; - - - cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + - bnx2x_cid_ilt_lines(bp); - cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS; - cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp); - cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp); - - if (NO_ISCSI_OOO(bp)) - cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO; -} - static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops, void *data) { @@ -12707,10 +12653,10 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev) cp->drv_ctl = bnx2x_drv_ctl; cp->drv_register_cnic = bnx2x_register_cnic; cp->drv_unregister_cnic = bnx2x_unregister_cnic; - cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp); + cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID; cp->iscsi_l2_client_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX); - cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp); + cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID; if (NO_ISCSI_OOO(bp)) cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO; diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h index a78e35683b03..bfef98f666c9 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h @@ -5913,7 +5913,6 @@ #define MISC_REGISTERS_SPIO_OUTPUT_LOW 0 #define MISC_REGISTERS_SPIO_SET_POS 8 #define HW_LOCK_MAX_RESOURCE_VALUE 31 -#define HW_LOCK_RESOURCE_DCBX_ADMIN_MIB 13 #define HW_LOCK_RESOURCE_DRV_FLAGS 10 #define HW_LOCK_RESOURCE_GPIO 1 #define HW_LOCK_RESOURCE_MDIO 0 diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 734fd87cd990..6c14b4a4e82c 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -4107,10 +4107,6 @@ static int bnx2x_setup_rss(struct bnx2x *bp, data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY; - if (test_bit(BNX2X_RSS_IPV4_UDP, &p->rss_flags)) - data->capabilities |= - ETH_RSS_UPDATE_RAMROD_DATA_IPV4_UDP_CAPABILITY; - if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags)) data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY; @@ -4119,10 +4115,6 @@ static int bnx2x_setup_rss(struct bnx2x *bp, data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY; - if (test_bit(BNX2X_RSS_IPV6_UDP, &p->rss_flags)) - data->capabilities |= - ETH_RSS_UPDATE_RAMROD_DATA_IPV6_UDP_CAPABILITY; - /* Hashing mask */ data->rss_result_mask = p->rss_result_mask; diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h index 76818ef08f9b..efd80bdd0dfe 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h @@ -694,10 +694,8 @@ enum { BNX2X_RSS_IPV4, BNX2X_RSS_IPV4_TCP, - BNX2X_RSS_IPV4_UDP, BNX2X_RSS_IPV6, BNX2X_RSS_IPV6_TCP, - BNX2X_RSS_IPV6_UDP, }; struct bnx2x_config_rss_params { @@ -731,10 +729,6 @@ struct bnx2x_rss_config_obj { /* Last configured indirection table */ u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE]; - /* flags for enabling 4-tupple hash on UDP */ - u8 udp_rss_v4; - u8 udp_rss_v6; - int (*config_rss)(struct bnx2x *bp, struct bnx2x_config_rss_params *p); }; diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index 514a528f6ddf..0e8bdcb9c748 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c @@ -859,22 +859,17 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp) struct tstorm_per_queue_stats *tclient = &bp->fw_stats_data->queue_stats[i]. tstorm_queue_statistics; - struct tstorm_per_queue_stats *old_tclient = - &bnx2x_fp_stats(bp, fp)->old_tclient; + struct tstorm_per_queue_stats *old_tclient = &fp->old_tclient; struct ustorm_per_queue_stats *uclient = &bp->fw_stats_data->queue_stats[i]. ustorm_queue_statistics; - struct ustorm_per_queue_stats *old_uclient = - &bnx2x_fp_stats(bp, fp)->old_uclient; + struct ustorm_per_queue_stats *old_uclient = &fp->old_uclient; struct xstorm_per_queue_stats *xclient = &bp->fw_stats_data->queue_stats[i]. xstorm_queue_statistics; - struct xstorm_per_queue_stats *old_xclient = - &bnx2x_fp_stats(bp, fp)->old_xclient; - struct bnx2x_eth_q_stats *qstats = - &bnx2x_fp_stats(bp, fp)->eth_q_stats; - struct bnx2x_eth_q_stats_old *qstats_old = - &bnx2x_fp_stats(bp, fp)->eth_q_stats_old; + struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient; + struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; + struct bnx2x_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old; u32 diff; @@ -1057,11 +1052,8 @@ static void bnx2x_net_stats_update(struct bnx2x *bp) nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi); tmp = estats->mac_discard; - for_each_rx_queue(bp, i) { - struct tstorm_per_queue_stats *old_tclient = - &bp->fp_stats[i].old_tclient; - tmp += le32_to_cpu(old_tclient->checksum_discard); - } + for_each_rx_queue(bp, i) + tmp += le32_to_cpu(bp->fp[i].old_tclient.checksum_discard); nstats->rx_dropped = tmp + bp->net_stats_old.rx_dropped; nstats->tx_dropped = 0; @@ -1111,9 +1103,9 @@ static void bnx2x_drv_stats_update(struct bnx2x *bp) int i; for_each_queue(bp, i) { - struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats; + struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats; struct bnx2x_eth_q_stats_old *qstats_old = - &bp->fp_stats[i].eth_q_stats_old; + &bp->fp[i].eth_q_stats_old; UPDATE_ESTAT_QSTAT(driver_xoff); UPDATE_ESTAT_QSTAT(rx_err_discard_pkt); @@ -1440,7 +1432,7 @@ static void bnx2x_prep_fw_stats_req(struct bnx2x *bp) query[first_queue_query_index + i]; cur_query_entry->kind = STATS_TYPE_QUEUE; - cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX(bp)]); + cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX]); cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset)); @@ -1491,19 +1483,15 @@ void bnx2x_stats_init(struct bnx2x *bp) /* function stats */ for_each_queue(bp, i) { - struct bnx2x_fp_stats *fp_stats = &bp->fp_stats[i]; - - memset(&fp_stats->old_tclient, 0, - sizeof(fp_stats->old_tclient)); - memset(&fp_stats->old_uclient, 0, - sizeof(fp_stats->old_uclient)); - memset(&fp_stats->old_xclient, 0, - sizeof(fp_stats->old_xclient)); + struct bnx2x_fastpath *fp = &bp->fp[i]; + + memset(&fp->old_tclient, 0, sizeof(fp->old_tclient)); + memset(&fp->old_uclient, 0, sizeof(fp->old_uclient)); + memset(&fp->old_xclient, 0, sizeof(fp->old_xclient)); if (bp->stats_init) { - memset(&fp_stats->eth_q_stats, 0, - sizeof(fp_stats->eth_q_stats)); - memset(&fp_stats->eth_q_stats_old, 0, - sizeof(fp_stats->eth_q_stats_old)); + memset(&fp->eth_q_stats, 0, sizeof(fp->eth_q_stats)); + memset(&fp->eth_q_stats_old, 0, + sizeof(fp->eth_q_stats_old)); } } @@ -1545,10 +1533,8 @@ void bnx2x_save_statistics(struct bnx2x *bp) /* save queue statistics */ for_each_eth_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; - struct bnx2x_eth_q_stats *qstats = - &bnx2x_fp_stats(bp, fp)->eth_q_stats; - struct bnx2x_eth_q_stats_old *qstats_old = - &bnx2x_fp_stats(bp, fp)->eth_q_stats_old; + struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; + struct bnx2x_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old; UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi); UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo); @@ -1587,7 +1573,7 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats, struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats; struct bnx2x_eth_stats *estats = &bp->eth_stats; struct per_queue_stats *fcoe_q_stats = - &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)]; + &bp->fw_stats_data->queue_stats[FCOE_IDX]; struct tstorm_per_queue_stats *fcoe_q_tstorm_stats = &fcoe_q_stats->tstorm_queue_statistics; @@ -1604,7 +1590,8 @@ void bnx2x_afex_collect_stats(struct bnx2x *bp, void *void_afex_stats, memset(afex_stats, 0, sizeof(struct afex_stats)); for_each_eth_queue(bp, i) { - struct bnx2x_eth_q_stats *qstats = &bp->fp_stats[i].eth_q_stats; + struct bnx2x_fastpath *fp = &bp->fp[i]; + struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; ADD_64(afex_stats->rx_unicast_bytes_hi, qstats->total_unicast_bytes_received_hi, diff --git a/trunk/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/trunk/drivers/net/wireless/ath/ath9k/ar9003_mac.c index 78816b8b2173..d9e0824af093 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/ar9003_mac.c +++ b/trunk/drivers/net/wireless/ath/ath9k/ar9003_mac.c @@ -181,14 +181,11 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked) u32 mask2 = 0; struct ath9k_hw_capabilities *pCap = &ah->caps; struct ath_common *common = ath9k_hw_common(ah); - u32 sync_cause = 0, async_cause, async_mask = AR_INTR_MAC_IRQ; - - if (ath9k_hw_mci_is_enabled(ah)) - async_mask |= AR_INTR_ASYNC_MASK_MCI; + u32 sync_cause = 0, async_cause; async_cause = REG_READ(ah, AR_INTR_ASYNC_CAUSE); - if (async_cause & async_mask) { + if (async_cause & (AR_INTR_MAC_IRQ | AR_INTR_ASYNC_MASK_MCI)) { if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M) == AR_RTC_STATUS_ON) isr = REG_READ(ah, AR_ISR); diff --git a/trunk/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/trunk/drivers/net/wireless/ath/ath9k/ar9003_mci.c index cc2853ade8f8..b1ced2a76da3 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/ar9003_mci.c +++ b/trunk/drivers/net/wireless/ath/ath9k/ar9003_mci.c @@ -321,7 +321,7 @@ void ar9003_mci_set_full_sleep(struct ath_hw *ah) { struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; - if (ar9003_mci_state(ah, MCI_STATE_ENABLE) && + if (ar9003_mci_state(ah, MCI_STATE_ENABLE, NULL) && (mci->bt_state != MCI_BT_SLEEP) && !mci->halted_bt_gpm) { ar9003_mci_send_coex_halt_bt_gpm(ah, true, true); @@ -484,7 +484,7 @@ static void ar9003_mci_sync_bt_state(struct ath_hw *ah) struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; u32 cur_bt_state; - cur_bt_state = ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP); + cur_bt_state = ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL); if (mci->bt_state != cur_bt_state) mci->bt_state = cur_bt_state; @@ -593,7 +593,8 @@ static u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type, if (!time_out) break; - offset = ar9003_mci_get_next_gpm_offset(ah, false, &more_data); + offset = ar9003_mci_state(ah, MCI_STATE_NEXT_GPM_OFFSET, + &more_data); if (offset == MCI_GPM_INVALID) continue; @@ -657,7 +658,8 @@ static u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type, time_out = 0; while (more_data == MCI_GPM_MORE) { - offset = ar9003_mci_get_next_gpm_offset(ah, false, &more_data); + offset = ar9003_mci_state(ah, MCI_STATE_NEXT_GPM_OFFSET, + &more_data); if (offset == MCI_GPM_INVALID) break; @@ -891,16 +893,13 @@ void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g, udelay(100); } - /* Check pending GPM msg before MCI Reset Rx */ - ar9003_mci_check_gpm_offset(ah); - regval |= SM(1, AR_MCI_COMMAND2_RESET_RX); REG_WRITE(ah, AR_MCI_COMMAND2, regval); udelay(1); regval &= ~SM(1, AR_MCI_COMMAND2_RESET_RX); REG_WRITE(ah, AR_MCI_COMMAND2, regval); - ar9003_mci_get_next_gpm_offset(ah, true, NULL); + ar9003_mci_state(ah, MCI_STATE_INIT_GPM_OFFSET, NULL); REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE, (SM(0xe801, AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR) | @@ -1011,32 +1010,38 @@ static void ar9003_mci_queue_unsent_gpm(struct ath_hw *ah, u8 header, } } -void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool force) +void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool wait_done) { struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; - if (!mci->update_2g5g && !force) + if (!mci->update_2g5g) return; if (mci->is_2g) { ar9003_mci_send_2g5g_status(ah, true); + ar9003_mci_send_lna_transfer(ah, true); + udelay(5); - REG_SET_BIT(ah, AR_MCI_TX_CTRL, + REG_CLR_BIT(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE); REG_CLR_BIT(ah, AR_PHY_GLB_CONTROL, AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL); if (!(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA)) - ar9003_mci_osla_setup(ah, true); + REG_SET_BIT(ah, AR_BTCOEX_CTRL, + AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN); } else { + ar9003_mci_send_lna_take(ah, true); + udelay(5); + REG_SET_BIT(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE); REG_SET_BIT(ah, AR_PHY_GLB_CONTROL, AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL); + REG_CLR_BIT(ah, AR_BTCOEX_CTRL, + AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN); - ar9003_mci_osla_setup(ah, false); - if (!force) - ar9003_mci_send_2g5g_status(ah, true); + ar9003_mci_send_2g5g_status(ah, true); } } @@ -1164,10 +1169,11 @@ void ar9003_mci_cleanup(struct ath_hw *ah) } EXPORT_SYMBOL(ar9003_mci_cleanup); -u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type) +u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data) { + struct ath_common *common = ath9k_hw_common(ah); struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; - u32 value = 0; + u32 value = 0, more_gpm = 0, gpm_ptr; u8 query_type; switch (state_type) { @@ -1179,6 +1185,81 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type) value = 0; } value &= AR_BTCOEX_CTRL_MCI_MODE_EN; + break; + case MCI_STATE_INIT_GPM_OFFSET: + value = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR); + mci->gpm_idx = value; + break; + case MCI_STATE_NEXT_GPM_OFFSET: + case MCI_STATE_LAST_GPM_OFFSET: + /* + * This could be useful to avoid new GPM message interrupt which + * may lead to spurious interrupt after power sleep, or multiple + * entry of ath_mci_intr(). + * Adding empty GPM check by returning HAL_MCI_GPM_INVALID can + * alleviate this effect, but clearing GPM RX interrupt bit is + * safe, because whether this is called from hw or driver code + * there must be an interrupt bit set/triggered initially + */ + REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, + AR_MCI_INTERRUPT_RX_MSG_GPM); + + gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR); + value = gpm_ptr; + + if (value == 0) + value = mci->gpm_len - 1; + else if (value >= mci->gpm_len) { + if (value != 0xFFFF) + value = 0; + } else { + value--; + } + + if (value == 0xFFFF) { + value = MCI_GPM_INVALID; + more_gpm = MCI_GPM_NOMORE; + } else if (state_type == MCI_STATE_NEXT_GPM_OFFSET) { + if (gpm_ptr == mci->gpm_idx) { + value = MCI_GPM_INVALID; + more_gpm = MCI_GPM_NOMORE; + } else { + for (;;) { + u32 temp_index; + + /* skip reserved GPM if any */ + + if (value != mci->gpm_idx) + more_gpm = MCI_GPM_MORE; + else + more_gpm = MCI_GPM_NOMORE; + + temp_index = mci->gpm_idx; + mci->gpm_idx++; + + if (mci->gpm_idx >= + mci->gpm_len) + mci->gpm_idx = 0; + + if (ar9003_mci_is_gpm_valid(ah, + temp_index)) { + value = temp_index; + break; + } + + if (more_gpm == MCI_GPM_NOMORE) { + value = MCI_GPM_INVALID; + break; + } + } + } + if (p_data) + *p_data = more_gpm; + } + + if (value != MCI_GPM_INVALID) + value <<= 4; + break; case MCI_STATE_LAST_SCHD_MSG_OFFSET: value = MS(REG_READ(ah, AR_MCI_RX_STATUS), @@ -1191,6 +1272,21 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type) AR_MCI_RX_REMOTE_SLEEP) ? MCI_BT_SLEEP : MCI_BT_AWAKE; break; + case MCI_STATE_CONT_RSSI_POWER: + value = MS(mci->cont_status, AR_MCI_CONT_RSSI_POWER); + break; + case MCI_STATE_CONT_PRIORITY: + value = MS(mci->cont_status, AR_MCI_CONT_RRIORITY); + break; + case MCI_STATE_CONT_TXRX: + value = MS(mci->cont_status, AR_MCI_CONT_TXRX); + break; + case MCI_STATE_BT: + value = mci->bt_state; + break; + case MCI_STATE_SET_BT_SLEEP: + mci->bt_state = MCI_BT_SLEEP; + break; case MCI_STATE_SET_BT_AWAKE: mci->bt_state = MCI_BT_AWAKE; ar9003_mci_send_coex_version_query(ah, true); @@ -1199,7 +1295,7 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type) if (mci->unhalt_bt_gpm) ar9003_mci_send_coex_halt_bt_gpm(ah, false, true); - ar9003_mci_2g5g_switch(ah, false); + ar9003_mci_2g5g_switch(ah, true); break; case MCI_STATE_SET_BT_CAL_START: mci->bt_state = MCI_BT_CAL_START; @@ -1223,6 +1319,34 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type) case MCI_STATE_SEND_WLAN_COEX_VERSION: ar9003_mci_send_coex_version_response(ah, true); break; + case MCI_STATE_SET_BT_COEX_VERSION: + if (!p_data) + ath_dbg(common, MCI, + "MCI Set BT Coex version with NULL data!!\n"); + else { + mci->bt_ver_major = (*p_data >> 8) & 0xff; + mci->bt_ver_minor = (*p_data) & 0xff; + mci->bt_version_known = true; + ath_dbg(common, MCI, "MCI BT version set: %d.%d\n", + mci->bt_ver_major, mci->bt_ver_minor); + } + break; + case MCI_STATE_SEND_WLAN_CHANNELS: + if (p_data) { + if (((mci->wlan_channels[1] & 0xffff0000) == + (*(p_data + 1) & 0xffff0000)) && + (mci->wlan_channels[2] == *(p_data + 2)) && + (mci->wlan_channels[3] == *(p_data + 3))) + break; + + mci->wlan_channels[0] = *p_data++; + mci->wlan_channels[1] = *p_data++; + mci->wlan_channels[2] = *p_data++; + mci->wlan_channels[3] = *p_data++; + } + mci->wlan_channels_update = true; + ar9003_mci_send_coex_wlan_channels(ah, true); + break; case MCI_STATE_SEND_VERSION_QUERY: ar9003_mci_send_coex_version_query(ah, true); break; @@ -1230,12 +1354,29 @@ u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type) query_type = MCI_GPM_COEX_QUERY_BT_TOPOLOGY; ar9003_mci_send_coex_bt_status_query(ah, true, query_type); break; + case MCI_STATE_NEED_FLUSH_BT_INFO: + /* + * btcoex_hw.mci.unhalt_bt_gpm means whether it's + * needed to send UNHALT message. It's set whenever + * there's a request to send HALT message. + * mci_halted_bt_gpm means whether HALT message is sent + * out successfully. + * + * Checking (mci_unhalt_bt_gpm == false) instead of + * checking (ah->mci_halted_bt_gpm == false) will make + * sure currently is in UNHALT-ed mode and BT can + * respond to status query. + */ + value = (!mci->unhalt_bt_gpm && mci->need_flush_btinfo) ? 1 : 0; + if (p_data) + mci->need_flush_btinfo = (*p_data != 0) ? true : false; + break; case MCI_STATE_RECOVER_RX: ar9003_mci_prep_interface(ah); mci->query_bt = true; mci->need_flush_btinfo = true; ar9003_mci_send_coex_wlan_channels(ah, true); - ar9003_mci_2g5g_switch(ah, false); + ar9003_mci_2g5g_switch(ah, true); break; case MCI_STATE_NEED_FTP_STOMP: value = !(mci->config & ATH_MCI_CONFIG_DISABLE_FTP_STOMP); @@ -1263,154 +1404,3 @@ void ar9003_mci_bt_gain_ctrl(struct ath_hw *ah) /* Force another 2g5g update at next scanning */ mci->update_2g5g = true; } - -void ar9003_mci_set_power_awake(struct ath_hw *ah) -{ - u32 btcoex_ctrl2, diag_sw; - int i; - u8 lna_ctrl, bt_sleep; - - for (i = 0; i < AH_WAIT_TIMEOUT; i++) { - btcoex_ctrl2 = REG_READ(ah, AR_BTCOEX_CTRL2); - if (btcoex_ctrl2 != 0xdeadbeef) - break; - udelay(AH_TIME_QUANTUM); - } - REG_WRITE(ah, AR_BTCOEX_CTRL2, (btcoex_ctrl2 | BIT(23))); - - for (i = 0; i < AH_WAIT_TIMEOUT; i++) { - diag_sw = REG_READ(ah, AR_DIAG_SW); - if (diag_sw != 0xdeadbeef) - break; - udelay(AH_TIME_QUANTUM); - } - REG_WRITE(ah, AR_DIAG_SW, (diag_sw | BIT(27) | BIT(19) | BIT(18))); - lna_ctrl = REG_READ(ah, AR_OBS_BUS_CTRL) & 0x3; - bt_sleep = REG_READ(ah, AR_MCI_RX_STATUS) & AR_MCI_RX_REMOTE_SLEEP; - - REG_WRITE(ah, AR_BTCOEX_CTRL2, btcoex_ctrl2); - REG_WRITE(ah, AR_DIAG_SW, diag_sw); - - if (bt_sleep && (lna_ctrl == 2)) { - REG_SET_BIT(ah, AR_BTCOEX_RC, 0x1); - REG_CLR_BIT(ah, AR_BTCOEX_RC, 0x1); - udelay(50); - } -} - -void ar9003_mci_check_gpm_offset(struct ath_hw *ah) -{ - struct ath_common *common = ath9k_hw_common(ah); - struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; - u32 offset; - - /* - * This should only be called before "MAC Warm Reset" or "MCI Reset Rx". - */ - offset = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR); - if (mci->gpm_idx == offset) - return; - ath_dbg(common, MCI, "GPM cached write pointer mismatch %d %d\n", - mci->gpm_idx, offset); - mci->query_bt = true; - mci->need_flush_btinfo = true; - mci->gpm_idx = 0; -} - -u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, bool first, u32 *more) -{ - struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; - u32 offset, more_gpm = 0, gpm_ptr; - - if (first) { - gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR); - mci->gpm_idx = gpm_ptr; - return gpm_ptr; - } - - /* - * This could be useful to avoid new GPM message interrupt which - * may lead to spurious interrupt after power sleep, or multiple - * entry of ath_mci_intr(). - * Adding empty GPM check by returning HAL_MCI_GPM_INVALID can - * alleviate this effect, but clearing GPM RX interrupt bit is - * safe, because whether this is called from hw or driver code - * there must be an interrupt bit set/triggered initially - */ - REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, - AR_MCI_INTERRUPT_RX_MSG_GPM); - - gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR); - offset = gpm_ptr; - - if (!offset) - offset = mci->gpm_len - 1; - else if (offset >= mci->gpm_len) { - if (offset != 0xFFFF) - offset = 0; - } else { - offset--; - } - - if ((offset == 0xFFFF) || (gpm_ptr == mci->gpm_idx)) { - offset = MCI_GPM_INVALID; - more_gpm = MCI_GPM_NOMORE; - goto out; - } - for (;;) { - u32 temp_index; - - /* skip reserved GPM if any */ - - if (offset != mci->gpm_idx) - more_gpm = MCI_GPM_MORE; - else - more_gpm = MCI_GPM_NOMORE; - - temp_index = mci->gpm_idx; - mci->gpm_idx++; - - if (mci->gpm_idx >= mci->gpm_len) - mci->gpm_idx = 0; - - if (ar9003_mci_is_gpm_valid(ah, temp_index)) { - offset = temp_index; - break; - } - - if (more_gpm == MCI_GPM_NOMORE) { - offset = MCI_GPM_INVALID; - break; - } - } - - if (offset != MCI_GPM_INVALID) - offset <<= 4; -out: - if (more) - *more = more_gpm; - - return offset; -} -EXPORT_SYMBOL(ar9003_mci_get_next_gpm_offset); - -void ar9003_mci_set_bt_version(struct ath_hw *ah, u8 major, u8 minor) -{ - struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; - - mci->bt_ver_major = major; - mci->bt_ver_minor = minor; - mci->bt_version_known = true; - ath_dbg(ath9k_hw_common(ah), MCI, "MCI BT version set: %d.%d\n", - mci->bt_ver_major, mci->bt_ver_minor); -} -EXPORT_SYMBOL(ar9003_mci_set_bt_version); - -void ar9003_mci_send_wlan_channels(struct ath_hw *ah) -{ - struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; - - mci->wlan_channels_update = true; - ar9003_mci_send_coex_wlan_channels(ah, true); -} -EXPORT_SYMBOL(ar9003_mci_send_wlan_channels); diff --git a/trunk/drivers/net/wireless/ath/ath9k/ar9003_mci.h b/trunk/drivers/net/wireless/ath/ath9k/ar9003_mci.h index d33b8e128855..10282e2bcdc9 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/ar9003_mci.h +++ b/trunk/drivers/net/wireless/ath/ath9k/ar9003_mci.h @@ -189,15 +189,26 @@ enum mci_bt_state { /* Type of state query */ enum mci_state_type { MCI_STATE_ENABLE, + MCI_STATE_INIT_GPM_OFFSET, + MCI_STATE_NEXT_GPM_OFFSET, + MCI_STATE_LAST_GPM_OFFSET, + MCI_STATE_BT, + MCI_STATE_SET_BT_SLEEP, MCI_STATE_SET_BT_AWAKE, MCI_STATE_SET_BT_CAL_START, MCI_STATE_SET_BT_CAL, MCI_STATE_LAST_SCHD_MSG_OFFSET, MCI_STATE_REMOTE_SLEEP, + MCI_STATE_CONT_RSSI_POWER, + MCI_STATE_CONT_PRIORITY, + MCI_STATE_CONT_TXRX, MCI_STATE_RESET_REQ_WAKE, MCI_STATE_SEND_WLAN_COEX_VERSION, + MCI_STATE_SET_BT_COEX_VERSION, + MCI_STATE_SEND_WLAN_CHANNELS, MCI_STATE_SEND_VERSION_QUERY, MCI_STATE_SEND_STATUS_QUERY, + MCI_STATE_NEED_FLUSH_BT_INFO, MCI_STATE_SET_CONCUR_TX_PRI, MCI_STATE_RECOVER_RX, MCI_STATE_NEED_FTP_STOMP, @@ -248,15 +259,14 @@ enum mci_gpm_coex_opcode { bool ar9003_mci_send_message(struct ath_hw *ah, u8 header, u32 flag, u32 *payload, u8 len, bool wait_done, bool check_bt); -u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type); +u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data); void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf, u16 len, u32 sched_addr); void ar9003_mci_cleanup(struct ath_hw *ah); void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr, u32 *rx_msg_intr); -u32 ar9003_mci_get_next_gpm_offset(struct ath_hw *ah, bool first, u32 *more); -void ar9003_mci_set_bt_version(struct ath_hw *ah, u8 major, u8 minor); -void ar9003_mci_send_wlan_channels(struct ath_hw *ah); +void ar9003_mci_bt_gain_ctrl(struct ath_hw *ah); + /* * These functions are used by ath9k_hw. */ @@ -267,7 +277,7 @@ void ar9003_mci_stop_bt(struct ath_hw *ah, bool save_fullsleep); void ar9003_mci_init_cal_req(struct ath_hw *ah, bool *is_reusable); void ar9003_mci_init_cal_done(struct ath_hw *ah); void ar9003_mci_set_full_sleep(struct ath_hw *ah); -void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool force); +void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool wait_done); void ar9003_mci_check_bt(struct ath_hw *ah); bool ar9003_mci_start_reset(struct ath_hw *ah, struct ath9k_channel *chan); int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan, @@ -275,9 +285,6 @@ int ar9003_mci_end_reset(struct ath_hw *ah, struct ath9k_channel *chan, void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g, bool is_full_sleep); void ar9003_mci_get_isr(struct ath_hw *ah, enum ath9k_int *masked); -void ar9003_mci_bt_gain_ctrl(struct ath_hw *ah); -void ar9003_mci_set_power_awake(struct ath_hw *ah); -void ar9003_mci_check_gpm_offset(struct ath_hw *ah); #else @@ -315,15 +322,6 @@ static inline void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g, static inline void ar9003_mci_get_isr(struct ath_hw *ah, enum ath9k_int *masked) { } -static inline void ar9003_mci_bt_gain_ctrl(struct ath_hw *ah) -{ -} -static inline void ar9003_mci_set_power_awake(struct ath_hw *ah) -{ -} -static inline void ar9003_mci_check_gpm_offset(struct ath_hw *ah) -{ -} #endif /* CONFIG_ATH9K_BTCOEX_SUPPORT */ #endif diff --git a/trunk/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/trunk/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h index 8f406ff2c95e..4a93e1534c1d 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h +++ b/trunk/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h @@ -52,7 +52,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = { {0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020}, {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000d8}, {0x00009e10, 0x92c88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e}, - {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3376605e, 0x32395d5e}, + {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3376605e, 0x33795d5e}, {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce}, diff --git a/trunk/drivers/net/wireless/ath/ath9k/ath9k.h b/trunk/drivers/net/wireless/ath/ath9k/ath9k.h index a8c050085648..02fc1c1e5eeb 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/trunk/drivers/net/wireless/ath/ath9k/ath9k.h @@ -698,7 +698,6 @@ struct ath_softc { #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT struct ath_btcoex btcoex; struct ath_mci_coex mci_coex; - struct work_struct mci_work; #endif struct ath_descdma txsdma; diff --git a/trunk/drivers/net/wireless/ath/ath9k/gpio.c b/trunk/drivers/net/wireless/ath/ath9k/gpio.c index 26032cb59b8a..af6d27350291 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/gpio.c +++ b/trunk/drivers/net/wireless/ath/ath9k/gpio.c @@ -202,7 +202,7 @@ static void ath_btcoex_period_timer(unsigned long data) btcoex->bt_wait_time += btcoex->btcoex_period; if (btcoex->bt_wait_time > ATH_BTCOEX_RX_WAIT_TIME) { - if (ar9003_mci_state(ah, MCI_STATE_NEED_FTP_STOMP) && + if (ar9003_mci_state(ah, MCI_STATE_NEED_FTP_STOMP, NULL) && (mci->num_pan || mci->num_other_acl)) ah->btcoex_hw.mci.stomp_ftp = (sc->rx.num_pkts < ATH_BTCOEX_STOMP_FTP_THRESH); @@ -232,7 +232,7 @@ static void ath_btcoex_period_timer(unsigned long data) } ath9k_ps_restore(sc); - timer_period = btcoex->btcoex_period; + timer_period = btcoex->btcoex_period / 1000; mod_timer(&btcoex->period_timer, jiffies + msecs_to_jiffies(timer_period)); } @@ -267,10 +267,10 @@ static int ath_init_btcoex_timer(struct ath_softc *sc) { struct ath_btcoex *btcoex = &sc->btcoex; - btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD; - btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) * 1000 * + btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD * 1000; + btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) * btcoex->btcoex_period / 100; - btcoex->btscan_no_stomp = (100 - ATH_BTCOEX_BTSCAN_DUTY_CYCLE) * 1000 * + btcoex->btscan_no_stomp = (100 - ATH_BTCOEX_BTSCAN_DUTY_CYCLE) * btcoex->btcoex_period / 100; setup_timer(&btcoex->period_timer, ath_btcoex_period_timer, diff --git a/trunk/drivers/net/wireless/ath/ath9k/hw.c b/trunk/drivers/net/wireless/ath/ath9k/hw.c index 784baee5db84..45e670087e1c 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/hw.c +++ b/trunk/drivers/net/wireless/ath/ath9k/hw.c @@ -1348,9 +1348,6 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type) } } - if (ath9k_hw_mci_is_enabled(ah)) - ar9003_mci_check_gpm_offset(ah); - REG_WRITE(ah, AR_RTC_RC, rst_flags); REGWRITE_BUFFER_FLUSH(ah); @@ -1711,7 +1708,7 @@ static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan) ath9k_hw_start_nfcal(ah, true); if (ath9k_hw_mci_is_enabled(ah)) - ar9003_mci_2g5g_switch(ah, false); + ar9003_mci_2g5g_switch(ah, true); if (AR_SREV_9271(ah)) ar9002_hw_load_ani_reg(ah, chan); @@ -1915,8 +1912,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, ath9k_hw_set_dma(ah); - if (!ath9k_hw_mci_is_enabled(ah)) - REG_WRITE(ah, AR_OBS, 8); + REG_WRITE(ah, AR_OBS, 8); if (ah->config.rx_intr_mitigation) { REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, 500); @@ -2115,9 +2111,6 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah) AR_RTC_FORCE_WAKE_EN); udelay(50); - if (ath9k_hw_mci_is_enabled(ah)) - ar9003_mci_set_power_awake(ah); - for (i = POWER_UP_TIME / 50; i > 0; i--) { val = REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M; if (val == AR_RTC_STATUS_ON) diff --git a/trunk/drivers/net/wireless/ath/ath9k/link.c b/trunk/drivers/net/wireless/ath/ath9k/link.c index a105c9426251..0cc4c70f7f0c 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/link.c +++ b/trunk/drivers/net/wireless/ath/ath9k/link.c @@ -136,14 +136,6 @@ void ath_hw_pll_work(struct work_struct *work) u32 pll_sqsum; struct ath_softc *sc = container_of(work, struct ath_softc, hw_pll_work.work); - /* - * ensure that the PLL WAR is executed only - * after the STA is associated (or) if the - * beaconing had started in interfaces that - * uses beacons. - */ - if (!test_bit(SC_OP_BEACONS, &sc->sc_flags)) - return; ath9k_ps_wakeup(sc); pll_sqsum = ar9003_get_pll_sqsum_dvc(sc->sc_ah); diff --git a/trunk/drivers/net/wireless/ath/ath9k/main.c b/trunk/drivers/net/wireless/ath/ath9k/main.c index 52561b341d68..c0f478b0a9a2 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/main.c +++ b/trunk/drivers/net/wireless/ath/ath9k/main.c @@ -150,9 +150,6 @@ static void __ath_cancel_work(struct ath_softc *sc) cancel_work_sync(&sc->hw_check_work); cancel_delayed_work_sync(&sc->tx_complete_work); cancel_delayed_work_sync(&sc->hw_pll_work); -#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT - cancel_work_sync(&sc->mci_work); -#endif } static void ath_cancel_work(struct ath_softc *sc) @@ -1036,6 +1033,15 @@ static int ath9k_add_interface(struct ieee80211_hw *hw, } } + if ((ah->opmode == NL80211_IFTYPE_ADHOC) || + ((vif->type == NL80211_IFTYPE_ADHOC) && + sc->nvifs > 0)) { + ath_err(common, "Cannot create ADHOC interface when other" + " interfaces already exist.\n"); + ret = -EINVAL; + goto out; + } + ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type); sc->nvifs++; @@ -1060,6 +1066,15 @@ static int ath9k_change_interface(struct ieee80211_hw *hw, mutex_lock(&sc->mutex); ath9k_ps_wakeup(sc); + /* See if new interface type is valid. */ + if ((new_type == NL80211_IFTYPE_ADHOC) && + (sc->nvifs > 1)) { + ath_err(common, "When using ADHOC, it must be the only" + " interface.\n"); + ret = -EINVAL; + goto out; + } + if (ath9k_uses_beacons(new_type) && !ath9k_uses_beacons(vif->type)) { if (sc->nbcnvifs >= ATH_BCBUF) { @@ -1243,7 +1258,6 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed) if (ath_set_channel(sc, hw, &sc->sc_ah->channels[pos]) < 0) { ath_err(common, "Unable to set channel\n"); mutex_unlock(&sc->mutex); - ath9k_ps_restore(sc); return -EINVAL; } diff --git a/trunk/drivers/net/wireless/ath/ath9k/mci.c b/trunk/drivers/net/wireless/ath/ath9k/mci.c index 7d34a504d617..49137f477b05 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/mci.c +++ b/trunk/drivers/net/wireless/ath/ath9k/mci.c @@ -20,7 +20,7 @@ #include "ath9k.h" #include "mci.h" -static const u8 ath_mci_duty_cycle[] = { 55, 50, 60, 70, 80, 85, 90, 95, 98 }; +static const u8 ath_mci_duty_cycle[] = { 0, 50, 60, 70, 80, 85, 90, 95, 98 }; static struct ath_mci_profile_info* ath_mci_find_profile(struct ath_mci_profile *mci, @@ -28,14 +28,11 @@ ath_mci_find_profile(struct ath_mci_profile *mci, { struct ath_mci_profile_info *entry; - if (list_empty(&mci->info)) - return NULL; - list_for_each_entry(entry, &mci->info, list) { if (entry->conn_handle == info->conn_handle) - return entry; + break; } - return NULL; + return entry; } static bool ath_mci_add_profile(struct ath_common *common, @@ -52,21 +49,31 @@ static bool ath_mci_add_profile(struct ath_common *common, (info->type != MCI_GPM_COEX_PROFILE_VOICE)) return false; - entry = kzalloc(sizeof(*entry), GFP_ATOMIC); - if (!entry) - return false; + entry = ath_mci_find_profile(mci, info); - memcpy(entry, info, 10); - INC_PROF(mci, info); - list_add_tail(&entry->list, &mci->info); + if (entry) { + memcpy(entry, info, 10); + } else { + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return false; + + memcpy(entry, info, 10); + INC_PROF(mci, info); + list_add_tail(&info->list, &mci->info); + } return true; } static void ath_mci_del_profile(struct ath_common *common, struct ath_mci_profile *mci, - struct ath_mci_profile_info *entry) + struct ath_mci_profile_info *info) { + struct ath_mci_profile_info *entry; + + entry = ath_mci_find_profile(mci, info); + if (!entry) return; @@ -79,16 +86,12 @@ void ath_mci_flush_profile(struct ath_mci_profile *mci) { struct ath_mci_profile_info *info, *tinfo; - mci->aggr_limit = 0; - - if (list_empty(&mci->info)) - return; - list_for_each_entry_safe(info, tinfo, &mci->info, list) { list_del(&info->list); DEC_PROF(mci, info); kfree(info); } + mci->aggr_limit = 0; } static void ath_mci_adjust_aggr_limit(struct ath_btcoex *btcoex) @@ -120,8 +123,6 @@ static void ath_mci_update_scheme(struct ath_softc *sc) if (mci_hw->config & ATH_MCI_CONFIG_DISABLE_TUNING) goto skip_tuning; - btcoex->duty_cycle = ath_mci_duty_cycle[num_profile]; - if (num_profile == 1) { info = list_first_entry(&mci->info, struct ath_mci_profile_info, @@ -180,11 +181,12 @@ static void ath_mci_update_scheme(struct ath_softc *sc) if (IS_CHAN_5GHZ(sc->sc_ah->curchan)) return; - btcoex->duty_cycle += (mci->num_bdr ? ATH_MCI_BDR_DUTY_CYCLE : 0); + btcoex->duty_cycle += (mci->num_bdr ? ATH_MCI_MAX_DUTY_CYCLE : 0); if (btcoex->duty_cycle > ATH_MCI_MAX_DUTY_CYCLE) btcoex->duty_cycle = ATH_MCI_MAX_DUTY_CYCLE; - btcoex->btcoex_no_stomp = btcoex->btcoex_period * 1000 * + btcoex->btcoex_period *= 1000; + btcoex->btcoex_no_stomp = btcoex->btcoex_period * (100 - btcoex->duty_cycle) / 100; ath9k_hw_btcoex_enable(sc->sc_ah); @@ -195,16 +197,20 @@ static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload) { struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); - struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci; u32 payload[4] = {0, 0, 0, 0}; switch (opcode) { case MCI_GPM_BT_CAL_REQ: - if (mci_hw->bt_state == MCI_BT_AWAKE) { - ar9003_mci_state(ah, MCI_STATE_SET_BT_CAL_START); + if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_AWAKE) { + ar9003_mci_state(ah, MCI_STATE_SET_BT_CAL_START, NULL); ieee80211_queue_work(sc->hw, &sc->hw_reset_work); + } else { + ath_dbg(common, MCI, "MCI State mismatch: %d\n", + ar9003_mci_state(ah, MCI_STATE_BT, NULL)); } - ath_dbg(common, MCI, "MCI State : %d\n", mci_hw->bt_state); + break; + case MCI_GPM_BT_CAL_DONE: + ar9003_mci_state(ah, MCI_STATE_BT, NULL); break; case MCI_GPM_BT_CAL_GRANT: MCI_GPM_SET_CAL_TYPE(payload, MCI_GPM_WLAN_CAL_DONE); @@ -217,42 +223,32 @@ static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload) } } -static void ath9k_mci_work(struct work_struct *work) -{ - struct ath_softc *sc = container_of(work, struct ath_softc, mci_work); - - ath_mci_update_scheme(sc); -} - static void ath_mci_process_profile(struct ath_softc *sc, struct ath_mci_profile_info *info) { struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_btcoex *btcoex = &sc->btcoex; struct ath_mci_profile *mci = &btcoex->mci; - struct ath_mci_profile_info *entry = NULL; - - entry = ath_mci_find_profile(mci, info); - if (entry) - memcpy(entry, info, 10); if (info->start) { - if (!entry && !ath_mci_add_profile(common, mci, info)) + if (!ath_mci_add_profile(common, mci, info)) return; } else - ath_mci_del_profile(common, mci, entry); + ath_mci_del_profile(common, mci, info); btcoex->btcoex_period = ATH_MCI_DEF_BT_PERIOD; mci->aggr_limit = mci->num_sco ? 6 : 0; - btcoex->duty_cycle = ath_mci_duty_cycle[NUM_PROF(mci)]; - if (NUM_PROF(mci)) + if (NUM_PROF(mci)) { btcoex->bt_stomp_type = ATH_BTCOEX_STOMP_LOW; - else + btcoex->duty_cycle = ath_mci_duty_cycle[NUM_PROF(mci)]; + } else { btcoex->bt_stomp_type = mci->num_mgmt ? ATH_BTCOEX_STOMP_ALL : ATH_BTCOEX_STOMP_LOW; + btcoex->duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE; + } - ieee80211_queue_work(sc->hw, &sc->mci_work); + ath_mci_update_scheme(sc); } static void ath_mci_process_status(struct ath_softc *sc, @@ -267,6 +263,8 @@ static void ath_mci_process_status(struct ath_softc *sc, if (status->is_link) return; + memset(&info, 0, sizeof(struct ath_mci_profile_info)); + info.conn_handle = status->conn_handle; if (ath_mci_find_profile(mci, &info)) return; @@ -286,7 +284,7 @@ static void ath_mci_process_status(struct ath_softc *sc, } while (++i < ATH_MCI_MAX_PROFILE); if (old_num_mgmt != mci->num_mgmt) - ieee80211_queue_work(sc->hw, &sc->mci_work); + ath_mci_update_scheme(sc); } static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload) @@ -295,20 +293,25 @@ static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload) struct ath_mci_profile_info profile_info; struct ath_mci_profile_status profile_status; struct ath_common *common = ath9k_hw_common(sc->sc_ah); - u8 major, minor; + u32 version; + u8 major; + u8 minor; u32 seq_num; switch (opcode) { case MCI_GPM_COEX_VERSION_QUERY: - ar9003_mci_state(ah, MCI_STATE_SEND_WLAN_COEX_VERSION); + version = ar9003_mci_state(ah, MCI_STATE_SEND_WLAN_COEX_VERSION, + NULL); break; case MCI_GPM_COEX_VERSION_RESPONSE: major = *(rx_payload + MCI_GPM_COEX_B_MAJOR_VERSION); minor = *(rx_payload + MCI_GPM_COEX_B_MINOR_VERSION); - ar9003_mci_set_bt_version(ah, major, minor); + version = (major << 8) + minor; + version = ar9003_mci_state(ah, MCI_STATE_SET_BT_COEX_VERSION, + &version); break; case MCI_GPM_COEX_STATUS_QUERY: - ar9003_mci_send_wlan_channels(ah); + ar9003_mci_state(ah, MCI_STATE_SEND_WLAN_CHANNELS, NULL); break; case MCI_GPM_COEX_BT_PROFILE_INFO: memcpy(&profile_info, @@ -375,7 +378,6 @@ int ath_mci_setup(struct ath_softc *sc) mci->gpm_buf.bf_addr, (mci->gpm_buf.bf_len >> 4), mci->sched_buf.bf_paddr); - INIT_WORK(&sc->mci_work, ath9k_mci_work); ath_dbg(common, MCI, "MCI Initialized\n"); return 0; @@ -403,7 +405,6 @@ void ath_mci_intr(struct ath_softc *sc) struct ath_mci_coex *mci = &sc->mci_coex; struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); - struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci; u32 mci_int, mci_int_rxmsg; u32 offset, subtype, opcode; u32 *pgpm; @@ -412,8 +413,8 @@ void ath_mci_intr(struct ath_softc *sc) ar9003_mci_get_interrupt(sc->sc_ah, &mci_int, &mci_int_rxmsg); - if (ar9003_mci_state(ah, MCI_STATE_ENABLE) == 0) { - ar9003_mci_get_next_gpm_offset(ah, true, NULL); + if (ar9003_mci_state(ah, MCI_STATE_ENABLE, NULL) == 0) { + ar9003_mci_state(ah, MCI_STATE_INIT_GPM_OFFSET, NULL); return; } @@ -432,41 +433,46 @@ void ath_mci_intr(struct ath_softc *sc) NULL, 0, true, false); mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE; - ar9003_mci_state(ah, MCI_STATE_RESET_REQ_WAKE); + ar9003_mci_state(ah, MCI_STATE_RESET_REQ_WAKE, NULL); /* * always do this for recovery and 2G/5G toggling and LNA_TRANS */ - ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE); + ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE, NULL); } if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING) { mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING; - if ((mci_hw->bt_state == MCI_BT_SLEEP) && - (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP) != - MCI_BT_SLEEP)) - ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE); + if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_SLEEP) { + if (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL) != + MCI_BT_SLEEP) + ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE, + NULL); + } } if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) { mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING; - if ((mci_hw->bt_state == MCI_BT_AWAKE) && - (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP) != - MCI_BT_AWAKE)) - mci_hw->bt_state = MCI_BT_SLEEP; + if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_AWAKE) { + if (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL) != + MCI_BT_AWAKE) + ar9003_mci_state(ah, MCI_STATE_SET_BT_SLEEP, + NULL); + } } if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) || (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)) { - ar9003_mci_state(ah, MCI_STATE_RECOVER_RX); + ar9003_mci_state(ah, MCI_STATE_RECOVER_RX, NULL); skip_gpm = true; } if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO) { mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO; - offset = ar9003_mci_state(ah, MCI_STATE_LAST_SCHD_MSG_OFFSET); + offset = ar9003_mci_state(ah, MCI_STATE_LAST_SCHD_MSG_OFFSET, + NULL); } if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_GPM) { @@ -475,8 +481,8 @@ void ath_mci_intr(struct ath_softc *sc) while (more_data == MCI_GPM_MORE) { pgpm = mci->gpm_buf.bf_addr; - offset = ar9003_mci_get_next_gpm_offset(ah, false, - &more_data); + offset = ar9003_mci_state(ah, MCI_STATE_NEXT_GPM_OFFSET, + &more_data); if (offset == MCI_GPM_INVALID) break; @@ -517,17 +523,23 @@ void ath_mci_intr(struct ath_softc *sc) mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_INFO; if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO) { - int value_dbm = MS(mci_hw->cont_status, - AR_MCI_CONT_RSSI_POWER); + int value_dbm = ar9003_mci_state(ah, + MCI_STATE_CONT_RSSI_POWER, NULL); mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_INFO; - ath_dbg(common, MCI, - "MCI CONT_INFO: (%s) pri = %d pwr = %d dBm\n", - MS(mci_hw->cont_status, AR_MCI_CONT_TXRX) ? - "tx" : "rx", - MS(mci_hw->cont_status, AR_MCI_CONT_PRIORITY), - value_dbm); + if (ar9003_mci_state(ah, MCI_STATE_CONT_TXRX, NULL)) + ath_dbg(common, MCI, + "MCI CONT_INFO: (tx) pri = %d, pwr = %d dBm\n", + ar9003_mci_state(ah, + MCI_STATE_CONT_PRIORITY, NULL), + value_dbm); + else + ath_dbg(common, MCI, + "MCI CONT_INFO: (rx) pri = %d,pwr = %d dBm\n", + ar9003_mci_state(ah, + MCI_STATE_CONT_PRIORITY, NULL), + value_dbm); } if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_NACK) diff --git a/trunk/drivers/net/wireless/ath/ath9k/rc.c b/trunk/drivers/net/wireless/ath/ath9k/rc.c index e034add9cd5a..92a6c0a87f89 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/rc.c +++ b/trunk/drivers/net/wireless/ath/ath9k/rc.c @@ -770,7 +770,7 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta, struct ieee80211_tx_rate *rates = tx_info->control.rates; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; __le16 fc = hdr->frame_control; - u8 try_per_rate, i = 0, rix; + u8 try_per_rate, i = 0, rix, high_rix; int is_probe = 0; if (rate_control_send_low(sta, priv_sta, txrc)) @@ -791,6 +791,7 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta, rate_table = ath_rc_priv->rate_table; rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table, &is_probe, false); + high_rix = rix; /* * If we're in HT mode and both us and our peer supports LDPC. @@ -838,16 +839,16 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta, try_per_rate = 8; /* - * If the last rate in the rate series is MCS and has - * more than 80% of per thresh, then use a legacy rate - * as last retry to ensure that the frame is tried in both - * MCS and legacy rate. + * Use a legacy rate as last retry to ensure that the frame + * is tried in both MCS and legacy rates. */ - ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix); - if (WLAN_RC_PHY_HT(rate_table->info[rix].phy) && - (ath_rc_priv->per[rix] > 45)) + if ((rates[2].flags & IEEE80211_TX_RC_MCS) && + (!(tx_info->flags & IEEE80211_TX_CTL_AMPDU) || + (ath_rc_priv->per[high_rix] > 45))) rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table, &is_probe, true); + else + ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix); /* All other rates in the series have RTS enabled */ ath_rc_rate_set_series(rate_table, &rates[i], txrc, diff --git a/trunk/drivers/net/wireless/ath/ath9k/reg.h b/trunk/drivers/net/wireless/ath/ath9k/reg.h index 75acefbd4937..560d6effac7a 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/reg.h +++ b/trunk/drivers/net/wireless/ath/ath9k/reg.h @@ -2098,8 +2098,8 @@ enum { #define AR_MCI_CONT_STATUS 0x1848 #define AR_MCI_CONT_RSSI_POWER 0x000000FF #define AR_MCI_CONT_RSSI_POWER_S 0 -#define AR_MCI_CONT_PRIORITY 0x0000FF00 -#define AR_MCI_CONT_PRIORITY_S 8 +#define AR_MCI_CONT_RRIORITY 0x0000FF00 +#define AR_MCI_CONT_RRIORITY_S 8 #define AR_MCI_CONT_TXRX 0x00010000 #define AR_MCI_CONT_TXRX_S 16 diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/trunk/drivers/net/wireless/brcm80211/brcmfmac/Makefile index 9d5170b6df50..abb48032753b 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/Makefile +++ b/trunk/drivers/net/wireless/brcm80211/brcmfmac/Makefile @@ -34,5 +34,3 @@ brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \ sdio_chip.o brcmfmac-$(CONFIG_BRCMFMAC_USB) += \ usb.o -brcmfmac-$(CONFIG_BRCMDBG) += \ - dhd_dbg.o \ No newline at end of file diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd.h index a11fe54f5950..9f637014486e 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd.h +++ b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd.h @@ -613,9 +613,6 @@ struct brcmf_pub { struct work_struct multicast_work; u8 macvalue[ETH_ALEN]; atomic_t pend_8021x_cnt; -#ifdef DEBUG - struct dentry *dbgfs_dir; -#endif }; struct brcmf_if_event { diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c deleted file mode 100644 index 7f89540b56da..000000000000 --- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.c +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Copyright (c) 2012 Broadcom Corporation - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY - * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION - * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN - * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ -#include -#include -#include -#include -#include - -#include -#include -#include -#include "dhd.h" -#include "dhd_bus.h" -#include "dhd_dbg.h" - -static struct dentry *root_folder; - -void brcmf_debugfs_init(void) -{ - root_folder = debugfs_create_dir(KBUILD_MODNAME, NULL); - if (IS_ERR(root_folder)) - root_folder = NULL; -} - -void brcmf_debugfs_exit(void) -{ - if (!root_folder) - return; - - debugfs_remove_recursive(root_folder); - root_folder = NULL; -} - -int brcmf_debugfs_attach(struct brcmf_pub *drvr) -{ - if (!root_folder) - return -ENODEV; - - drvr->dbgfs_dir = debugfs_create_dir(dev_name(drvr->dev), root_folder); - return PTR_RET(drvr->dbgfs_dir); -} - -void brcmf_debugfs_detach(struct brcmf_pub *drvr) -{ - if (!IS_ERR_OR_NULL(drvr->dbgfs_dir)) - debugfs_remove_recursive(drvr->dbgfs_dir); -} - -struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr) -{ - return drvr->dbgfs_dir; -} - -static -ssize_t brcmf_debugfs_sdio_counter_read(struct file *f, char __user *data, - size_t count, loff_t *ppos) -{ - struct brcmf_sdio_count *sdcnt = f->private_data; - char buf[750]; - int res; - - /* only allow read from start */ - if (*ppos > 0) - return 0; - - res = scnprintf(buf, sizeof(buf), - "intrcount: %u\nlastintrs: %u\n" - "pollcnt: %u\nregfails: %u\n" - "tx_sderrs: %u\nfcqueued: %u\n" - "rxrtx: %u\nrx_toolong: %u\n" - "rxc_errors: %u\nrx_hdrfail: %u\n" - "rx_badhdr: %u\nrx_badseq: %u\n" - "fc_rcvd: %u\nfc_xoff: %u\n" - "fc_xon: %u\nrxglomfail: %u\n" - "rxglomframes: %u\nrxglompkts: %u\n" - "f2rxhdrs: %u\nf2rxdata: %u\n" - "f2txdata: %u\nf1regdata: %u\n" - "tickcnt: %u\ntx_ctlerrs: %lu\n" - "tx_ctlpkts: %lu\nrx_ctlerrs: %lu\n" - "rx_ctlpkts: %lu\nrx_readahead: %lu\n", - sdcnt->intrcount, sdcnt->lastintrs, - sdcnt->pollcnt, sdcnt->regfails, - sdcnt->tx_sderrs, sdcnt->fcqueued, - sdcnt->rxrtx, sdcnt->rx_toolong, - sdcnt->rxc_errors, sdcnt->rx_hdrfail, - sdcnt->rx_badhdr, sdcnt->rx_badseq, - sdcnt->fc_rcvd, sdcnt->fc_xoff, - sdcnt->fc_xon, sdcnt->rxglomfail, - sdcnt->rxglomframes, sdcnt->rxglompkts, - sdcnt->f2rxhdrs, sdcnt->f2rxdata, - sdcnt->f2txdata, sdcnt->f1regdata, - sdcnt->tickcnt, sdcnt->tx_ctlerrs, - sdcnt->tx_ctlpkts, sdcnt->rx_ctlerrs, - sdcnt->rx_ctlpkts, sdcnt->rx_readahead_cnt); - - return simple_read_from_buffer(data, count, ppos, buf, res); -} - -static const struct file_operations brcmf_debugfs_sdio_counter_ops = { - .owner = THIS_MODULE, - .open = simple_open, - .read = brcmf_debugfs_sdio_counter_read -}; - -void brcmf_debugfs_create_sdio_count(struct brcmf_pub *drvr, - struct brcmf_sdio_count *sdcnt) -{ - struct dentry *dentry = drvr->dbgfs_dir; - - if (!IS_ERR_OR_NULL(dentry)) - debugfs_create_file("counters", S_IRUGO, dentry, - sdcnt, &brcmf_debugfs_sdio_counter_ops); -} diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h index b784920532d3..a2c4576cf9ff 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h +++ b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h @@ -76,63 +76,4 @@ do { \ extern int brcmf_msg_level; -/* - * hold counter variables used in brcmfmac sdio driver. - */ -struct brcmf_sdio_count { - uint intrcount; /* Count of device interrupt callbacks */ - uint lastintrs; /* Count as of last watchdog timer */ - uint pollcnt; /* Count of active polls */ - uint regfails; /* Count of R_REG failures */ - uint tx_sderrs; /* Count of tx attempts with sd errors */ - uint fcqueued; /* Tx packets that got queued */ - uint rxrtx; /* Count of rtx requests (NAK to dongle) */ - uint rx_toolong; /* Receive frames too long to receive */ - uint rxc_errors; /* SDIO errors when reading control frames */ - uint rx_hdrfail; /* SDIO errors on header reads */ - uint rx_badhdr; /* Bad received headers (roosync?) */ - uint rx_badseq; /* Mismatched rx sequence number */ - uint fc_rcvd; /* Number of flow-control events received */ - uint fc_xoff; /* Number which turned on flow-control */ - uint fc_xon; /* Number which turned off flow-control */ - uint rxglomfail; /* Failed deglom attempts */ - uint rxglomframes; /* Number of glom frames (superframes) */ - uint rxglompkts; /* Number of packets from glom frames */ - uint f2rxhdrs; /* Number of header reads */ - uint f2rxdata; /* Number of frame data reads */ - uint f2txdata; /* Number of f2 frame writes */ - uint f1regdata; /* Number of f1 register accesses */ - uint tickcnt; /* Number of watchdog been schedule */ - ulong tx_ctlerrs; /* Err of sending ctrl frames */ - ulong tx_ctlpkts; /* Ctrl frames sent to dongle */ - ulong rx_ctlerrs; /* Err of processing rx ctrl frames */ - ulong rx_ctlpkts; /* Ctrl frames processed from dongle */ - ulong rx_readahead_cnt; /* packets where header read-ahead was used */ -}; - -struct brcmf_pub; -#ifdef DEBUG -void brcmf_debugfs_init(void); -void brcmf_debugfs_exit(void); -int brcmf_debugfs_attach(struct brcmf_pub *drvr); -void brcmf_debugfs_detach(struct brcmf_pub *drvr); -struct dentry *brcmf_debugfs_get_devdir(struct brcmf_pub *drvr); -void brcmf_debugfs_create_sdio_count(struct brcmf_pub *drvr, - struct brcmf_sdio_count *sdcnt); -#else -static inline void brcmf_debugfs_init(void) -{ -} -static inline void brcmf_debugfs_exit(void) -{ -} -static inline int brcmf_debugfs_attach(struct brcmf_pub *drvr) -{ - return 0; -} -static inline void brcmf_debugfs_detach(struct brcmf_pub *drvr) -{ -} -#endif - #endif /* _BRCMF_DBG_H_ */ diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c index 01cf6c03390b..8933f9b31a9a 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c +++ b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c @@ -1007,9 +1007,6 @@ int brcmf_attach(uint bus_hdrlen, struct device *dev) drvr->bus_if->drvr = drvr; drvr->dev = dev; - /* create device debugfs folder */ - brcmf_debugfs_attach(drvr); - /* Attach and link in the protocol */ ret = brcmf_proto_attach(drvr); if (ret != 0) { @@ -1126,7 +1123,6 @@ void brcmf_detach(struct device *dev) brcmf_proto_detach(drvr); } - brcmf_debugfs_detach(drvr); bus_if->drvr = NULL; kfree(drvr); } @@ -1196,8 +1192,6 @@ int brcmf_write_to_file(struct brcmf_pub *drvr, const u8 *buf, int size) static void brcmf_driver_init(struct work_struct *work) { - brcmf_debugfs_init(); - #ifdef CONFIG_BRCMFMAC_SDIO brcmf_sdio_init(); #endif @@ -1225,7 +1219,6 @@ static void __exit brcmfmac_module_exit(void) #ifdef CONFIG_BRCMFMAC_USB brcmf_usb_exit(); #endif - brcmf_debugfs_exit(); } module_init(brcmfmac_module_init); diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c index b023766954a6..4deae28fc211 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c +++ b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c @@ -502,9 +502,12 @@ struct brcmf_sdio { bool intr; /* Use interrupts */ bool poll; /* Use polling */ bool ipend; /* Device interrupt is pending */ + uint intrcount; /* Count of device interrupt callbacks */ + uint lastintrs; /* Count as of last watchdog timer */ uint spurious; /* Count of spurious interrupts */ uint pollrate; /* Ticks between device polls */ uint polltick; /* Tick counter */ + uint pollcnt; /* Count of active polls */ #ifdef DEBUG uint console_interval; @@ -512,6 +515,8 @@ struct brcmf_sdio { uint console_addr; /* Console address from shared struct */ #endif /* DEBUG */ + uint regfails; /* Count of R_REG failures */ + uint clkstate; /* State of sd and backplane clock(s) */ bool activity; /* Activity flag for clock down */ s32 idletime; /* Control for activity timeout */ @@ -526,6 +531,33 @@ struct brcmf_sdio { /* Field to decide if rx of control frames happen in rxbuf or lb-pool */ bool usebufpool; + /* Some additional counters */ + uint tx_sderrs; /* Count of tx attempts with sd errors */ + uint fcqueued; /* Tx packets that got queued */ + uint rxrtx; /* Count of rtx requests (NAK to dongle) */ + uint rx_toolong; /* Receive frames too long to receive */ + uint rxc_errors; /* SDIO errors when reading control frames */ + uint rx_hdrfail; /* SDIO errors on header reads */ + uint rx_badhdr; /* Bad received headers (roosync?) */ + uint rx_badseq; /* Mismatched rx sequence number */ + uint fc_rcvd; /* Number of flow-control events received */ + uint fc_xoff; /* Number which turned on flow-control */ + uint fc_xon; /* Number which turned off flow-control */ + uint rxglomfail; /* Failed deglom attempts */ + uint rxglomframes; /* Number of glom frames (superframes) */ + uint rxglompkts; /* Number of packets from glom frames */ + uint f2rxhdrs; /* Number of header reads */ + uint f2rxdata; /* Number of frame data reads */ + uint f2txdata; /* Number of f2 frame writes */ + uint f1regdata; /* Number of f1 register accesses */ + uint tickcnt; /* Number of watchdog been schedule */ + unsigned long tx_ctlerrs; /* Err of sending ctrl frames */ + unsigned long tx_ctlpkts; /* Ctrl frames sent to dongle */ + unsigned long rx_ctlerrs; /* Err of processing rx ctrl frames */ + unsigned long rx_ctlpkts; /* Ctrl frames processed from dongle */ + unsigned long rx_readahead_cnt; /* Number of packets where header + * read-ahead was used. */ + u8 *ctrl_frame_buf; u32 ctrl_frame_len; bool ctrl_frame_stat; @@ -551,7 +583,6 @@ struct brcmf_sdio { u32 fw_ptr; bool txoff; /* Transmit flow-controlled */ - struct brcmf_sdio_count sdcnt; }; /* clkstate */ @@ -914,7 +945,7 @@ static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus) if (ret == 0) w_sdreg32(bus, SMB_INT_ACK, offsetof(struct sdpcmd_regs, tosbmailbox)); - bus->sdcnt.f1regdata += 2; + bus->f1regdata += 2; /* Dongle recomposed rx frames, accept them again */ if (hmb_data & HMB_DATA_NAKHANDLED) { @@ -953,12 +984,12 @@ static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus) HMB_DATA_FCDATA_SHIFT; if (fcbits & ~bus->flowcontrol) - bus->sdcnt.fc_xoff++; + bus->fc_xoff++; if (bus->flowcontrol & ~fcbits) - bus->sdcnt.fc_xon++; + bus->fc_xon++; - bus->sdcnt.fc_rcvd++; + bus->fc_rcvd++; bus->flowcontrol = fcbits; } @@ -990,7 +1021,7 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx) brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL, SFC_RF_TERM, &err); - bus->sdcnt.f1regdata++; + bus->f1regdata++; /* Wait until the packet has been flushed (device/FIFO stable) */ for (lastrbc = retries = 0xffff; retries > 0; retries--) { @@ -998,7 +1029,7 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx) SBSDIO_FUNC1_RFRAMEBCHI, &err); lo = brcmf_sdio_regrb(bus->sdiodev, SBSDIO_FUNC1_RFRAMEBCLO, &err); - bus->sdcnt.f1regdata += 2; + bus->f1regdata += 2; if ((hi == 0) && (lo == 0)) break; @@ -1016,11 +1047,11 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx) brcmf_dbg(INFO, "flush took %d iterations\n", 0xffff - retries); if (rtx) { - bus->sdcnt.rxrtx++; + bus->rxrtx++; err = w_sdreg32(bus, SMB_NAK, offsetof(struct sdpcmd_regs, tosbmailbox)); - bus->sdcnt.f1regdata++; + bus->f1regdata++; if (err == 0) bus->rxskip = true; } @@ -1212,7 +1243,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq) dlen); errcode = -1; } - bus->sdcnt.f2rxdata++; + bus->f2rxdata++; /* On failure, kill the superframe, allow a couple retries */ if (errcode < 0) { @@ -1225,7 +1256,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq) } else { bus->glomerr = 0; brcmf_sdbrcm_rxfail(bus, true, false); - bus->sdcnt.rxglomfail++; + bus->rxglomfail++; brcmf_sdbrcm_free_glom(bus); } return 0; @@ -1281,7 +1312,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq) if (rxseq != seq) { brcmf_dbg(INFO, "(superframe) rx_seq %d, expected %d\n", seq, rxseq); - bus->sdcnt.rx_badseq++; + bus->rx_badseq++; rxseq = seq; } @@ -1345,7 +1376,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq) } else { bus->glomerr = 0; brcmf_sdbrcm_rxfail(bus, true, false); - bus->sdcnt.rxglomfail++; + bus->rxglomfail++; brcmf_sdbrcm_free_glom(bus); } bus->nextlen = 0; @@ -1371,7 +1402,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq) if (rxseq != seq) { brcmf_dbg(GLOM, "rx_seq %d, expected %d\n", seq, rxseq); - bus->sdcnt.rx_badseq++; + bus->rx_badseq++; rxseq = seq; } rxseq++; @@ -1410,8 +1441,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq) down(&bus->sdsem); } - bus->sdcnt.rxglomframes++; - bus->sdcnt.rxglompkts += bus->glom.qlen; + bus->rxglomframes++; + bus->rxglompkts += bus->glom.qlen; } return num; } @@ -1495,7 +1526,7 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff) brcmf_dbg(ERROR, "%d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n", len, len - doff, bus->sdiodev->bus_if->maxctl); bus->sdiodev->bus_if->dstats.rx_errors++; - bus->sdcnt.rx_toolong++; + bus->rx_toolong++; brcmf_sdbrcm_rxfail(bus, false, false); goto done; } @@ -1505,13 +1536,13 @@ brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff) bus->sdiodev->sbwad, SDIO_FUNC_2, F2SYNC, (bus->rxctl + BRCMF_FIRSTREAD), rdlen); - bus->sdcnt.f2rxdata++; + bus->f2rxdata++; /* Control frame failures need retransmission */ if (sdret < 0) { brcmf_dbg(ERROR, "read %d control bytes failed: %d\n", rdlen, sdret); - bus->sdcnt.rxc_errors++; + bus->rxc_errors++; brcmf_sdbrcm_rxfail(bus, true, true); goto done; } @@ -1558,7 +1589,7 @@ brcmf_alloc_pkt_and_read(struct brcmf_sdio *bus, u16 rdlen, /* Read the entire frame */ sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad, SDIO_FUNC_2, F2SYNC, *pkt); - bus->sdcnt.f2rxdata++; + bus->f2rxdata++; if (sdret < 0) { brcmf_dbg(ERROR, "(nextlen): read %d bytes failed: %d\n", @@ -1599,7 +1630,7 @@ brcmf_check_rxbuf(struct brcmf_sdio *bus, struct sk_buff *pkt, u8 *rxbuf, if ((u16)~(*len ^ check)) { brcmf_dbg(ERROR, "(nextlen): HW hdr error: nextlen/len/check 0x%04x/0x%04x/0x%04x\n", nextlen, *len, check); - bus->sdcnt.rx_badhdr++; + bus->rx_badhdr++; brcmf_sdbrcm_rxfail(bus, false, false); goto fail; } @@ -1715,7 +1746,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished) bus->nextlen = 0; } - bus->sdcnt.rx_readahead_cnt++; + bus->rx_readahead_cnt++; /* Handle Flow Control */ fcbits = SDPCM_FCMASK_VALUE( @@ -1723,12 +1754,12 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished) if (bus->flowcontrol != fcbits) { if (~bus->flowcontrol & fcbits) - bus->sdcnt.fc_xoff++; + bus->fc_xoff++; if (bus->flowcontrol & ~fcbits) - bus->sdcnt.fc_xon++; + bus->fc_xon++; - bus->sdcnt.fc_rcvd++; + bus->fc_rcvd++; bus->flowcontrol = fcbits; } @@ -1736,7 +1767,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished) if (rxseq != seq) { brcmf_dbg(INFO, "(nextlen): rx_seq %d, expected %d\n", seq, rxseq); - bus->sdcnt.rx_badseq++; + bus->rx_badseq++; rxseq = seq; } @@ -1783,11 +1814,11 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished) sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad, SDIO_FUNC_2, F2SYNC, bus->rxhdr, BRCMF_FIRSTREAD); - bus->sdcnt.f2rxhdrs++; + bus->f2rxhdrs++; if (sdret < 0) { brcmf_dbg(ERROR, "RXHEADER FAILED: %d\n", sdret); - bus->sdcnt.rx_hdrfail++; + bus->rx_hdrfail++; brcmf_sdbrcm_rxfail(bus, true, true); continue; } @@ -1809,7 +1840,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished) if ((u16) ~(len ^ check)) { brcmf_dbg(ERROR, "HW hdr err: len/check 0x%04x/0x%04x\n", len, check); - bus->sdcnt.rx_badhdr++; + bus->rx_badhdr++; brcmf_sdbrcm_rxfail(bus, false, false); continue; } @@ -1830,7 +1861,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished) if ((doff < SDPCM_HDRLEN) || (doff > len)) { brcmf_dbg(ERROR, "Bad data offset %d: HW len %d, min %d seq %d\n", doff, len, SDPCM_HDRLEN, seq); - bus->sdcnt.rx_badhdr++; + bus->rx_badhdr++; brcmf_sdbrcm_rxfail(bus, false, false); continue; } @@ -1849,19 +1880,19 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished) if (bus->flowcontrol != fcbits) { if (~bus->flowcontrol & fcbits) - bus->sdcnt.fc_xoff++; + bus->fc_xoff++; if (bus->flowcontrol & ~fcbits) - bus->sdcnt.fc_xon++; + bus->fc_xon++; - bus->sdcnt.fc_rcvd++; + bus->fc_rcvd++; bus->flowcontrol = fcbits; } /* Check and update sequence number */ if (rxseq != seq) { brcmf_dbg(INFO, "rx_seq %d, expected %d\n", seq, rxseq); - bus->sdcnt.rx_badseq++; + bus->rx_badseq++; rxseq = seq; } @@ -1906,7 +1937,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished) brcmf_dbg(ERROR, "too long: len %d rdlen %d\n", len, rdlen); bus->sdiodev->bus_if->dstats.rx_errors++; - bus->sdcnt.rx_toolong++; + bus->rx_toolong++; brcmf_sdbrcm_rxfail(bus, false, false); continue; } @@ -1929,7 +1960,7 @@ brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished) /* Read the remaining frame data */ sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad, SDIO_FUNC_2, F2SYNC, pkt); - bus->sdcnt.f2rxdata++; + bus->f2rxdata++; if (sdret < 0) { brcmf_dbg(ERROR, "read %d %s bytes failed: %d\n", rdlen, @@ -2116,18 +2147,18 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt, ret = brcmf_sdcard_send_pkt(bus->sdiodev, bus->sdiodev->sbwad, SDIO_FUNC_2, F2SYNC, pkt); - bus->sdcnt.f2txdata++; + bus->f2txdata++; if (ret < 0) { /* On failure, abort the command and terminate the frame */ brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n", ret); - bus->sdcnt.tx_sderrs++; + bus->tx_sderrs++; brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2); brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL, SFC_WF_TERM, NULL); - bus->sdcnt.f1regdata++; + bus->f1regdata++; for (i = 0; i < 3; i++) { u8 hi, lo; @@ -2135,7 +2166,7 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt, SBSDIO_FUNC1_WFRAMEBCHI, NULL); lo = brcmf_sdio_regrb(bus->sdiodev, SBSDIO_FUNC1_WFRAMEBCLO, NULL); - bus->sdcnt.f1regdata += 2; + bus->f1regdata += 2; if ((hi == 0) && (lo == 0)) break; } @@ -2193,7 +2224,7 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes) ret = r_sdreg32(bus, &intstatus, offsetof(struct sdpcmd_regs, intstatus)); - bus->sdcnt.f2txdata++; + bus->f2txdata++; if (ret != 0) break; if (intstatus & bus->hostintmask) @@ -2386,7 +2417,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus) bus->ipend = false; err = r_sdreg32(bus, &newstatus, offsetof(struct sdpcmd_regs, intstatus)); - bus->sdcnt.f1regdata++; + bus->f1regdata++; if (err != 0) newstatus = 0; newstatus &= bus->hostintmask; @@ -2395,7 +2426,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus) err = w_sdreg32(bus, newstatus, offsetof(struct sdpcmd_regs, intstatus)); - bus->sdcnt.f1regdata++; + bus->f1regdata++; } } @@ -2414,7 +2445,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus) err = r_sdreg32(bus, &newstatus, offsetof(struct sdpcmd_regs, intstatus)); - bus->sdcnt.f1regdata += 2; + bus->f1regdata += 2; bus->fcstate = !!(newstatus & (I_HMB_FC_STATE | I_HMB_FC_CHANGE)); intstatus |= (newstatus & bus->hostintmask); @@ -2479,13 +2510,13 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus) terminate the frame */ brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n", ret); - bus->sdcnt.tx_sderrs++; + bus->tx_sderrs++; brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2); brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL, SFC_WF_TERM, &err); - bus->sdcnt.f1regdata++; + bus->f1regdata++; for (i = 0; i < 3; i++) { u8 hi, lo; @@ -2495,7 +2526,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus) lo = brcmf_sdio_regrb(bus->sdiodev, SBSDIO_FUNC1_WFRAMEBCLO, &err); - bus->sdcnt.f1regdata += 2; + bus->f1regdata += 2; if ((hi == 0) && (lo == 0)) break; } @@ -2626,7 +2657,7 @@ static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt) /* Check for existing queue, current flow-control, pending event, or pending clock */ brcmf_dbg(TRACE, "deferring pktq len %d\n", pktq_len(&bus->txq)); - bus->sdcnt.fcqueued++; + bus->fcqueued++; /* Priority based enq */ spin_lock_bh(&bus->txqlock); @@ -2814,13 +2845,13 @@ static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len) /* On failure, abort the command and terminate the frame */ brcmf_dbg(INFO, "sdio error %d, abort command and terminate frame\n", ret); - bus->sdcnt.tx_sderrs++; + bus->tx_sderrs++; brcmf_sdcard_abort(bus->sdiodev, SDIO_FUNC_2); brcmf_sdio_regwb(bus->sdiodev, SBSDIO_FUNC1_FRAMECTRL, SFC_WF_TERM, NULL); - bus->sdcnt.f1regdata++; + bus->f1regdata++; for (i = 0; i < 3; i++) { u8 hi, lo; @@ -2828,7 +2859,7 @@ static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len) SBSDIO_FUNC1_WFRAMEBCHI, NULL); lo = brcmf_sdio_regrb(bus->sdiodev, SBSDIO_FUNC1_WFRAMEBCLO, NULL); - bus->sdcnt.f1regdata += 2; + bus->f1regdata += 2; if (hi == 0 && lo == 0) break; } @@ -2945,26 +2976,13 @@ brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen) up(&bus->sdsem); if (ret) - bus->sdcnt.tx_ctlerrs++; + bus->tx_ctlerrs++; else - bus->sdcnt.tx_ctlpkts++; + bus->tx_ctlpkts++; return ret ? -EIO : 0; } -#ifdef DEBUG -static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus) -{ - struct brcmf_pub *drvr = bus->sdiodev->bus_if->drvr; - - brcmf_debugfs_create_sdio_count(drvr, &bus->sdcnt); -} -#else -static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus) -{ -} -#endif /* DEBUG */ - static int brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen) { @@ -2999,9 +3017,9 @@ brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen) } if (rxlen) - bus->sdcnt.rx_ctlpkts++; + bus->rx_ctlpkts++; else - bus->sdcnt.rx_ctlerrs++; + bus->rx_ctlerrs++; return rxlen ? (int)rxlen : -ETIMEDOUT; } @@ -3401,7 +3419,7 @@ static int brcmf_sdbrcm_bus_init(struct device *dev) return 0; /* Start the watchdog timer */ - bus->sdcnt.tickcnt = 0; + bus->tickcnt = 0; brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS); down(&bus->sdsem); @@ -3494,7 +3512,7 @@ void brcmf_sdbrcm_isr(void *arg) return; } /* Count the interrupt call */ - bus->sdcnt.intrcount++; + bus->intrcount++; bus->ipend = true; /* Shouldn't get this interrupt if we're sleeping? */ @@ -3536,8 +3554,7 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus) bus->polltick = 0; /* Check device if no interrupts */ - if (!bus->intr || - (bus->sdcnt.intrcount == bus->sdcnt.lastintrs)) { + if (!bus->intr || (bus->intrcount == bus->lastintrs)) { if (!bus->dpc_sched) { u8 devpend; @@ -3552,7 +3569,7 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus) /* If there is something, make like the ISR and schedule the DPC */ if (intstatus) { - bus->sdcnt.pollcnt++; + bus->pollcnt++; bus->ipend = true; bus->dpc_sched = true; @@ -3564,7 +3581,7 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus) } /* Update interrupt tracking */ - bus->sdcnt.lastintrs = bus->sdcnt.intrcount; + bus->lastintrs = bus->intrcount; } #ifdef DEBUG /* Poll for console output periodically */ @@ -3776,7 +3793,7 @@ brcmf_sdbrcm_watchdog_thread(void *data) if (!wait_for_completion_interruptible(&bus->watchdog_wait)) { brcmf_sdbrcm_bus_watchdog(bus); /* Count the tick for reference */ - bus->sdcnt.tickcnt++; + bus->tickcnt++; } else break; } @@ -3817,6 +3834,7 @@ static void brcmf_sdbrcm_release_dongle(struct brcmf_sdio *bus) static void brcmf_sdbrcm_release(struct brcmf_sdio *bus) { brcmf_dbg(TRACE, "Enter\n"); + if (bus) { /* De-register interrupt handler */ brcmf_sdio_intr_unregister(bus->sdiodev); @@ -3920,7 +3938,6 @@ void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev) goto fail; } - brcmf_sdio_debugfs_create(bus); brcmf_dbg(INFO, "completed!!\n"); /* if firmware path present try to download and bring up bus */ diff --git a/trunk/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c b/trunk/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c index 01b190a25d94..95b5902bc4b3 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c +++ b/trunk/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c @@ -735,8 +735,10 @@ brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi, * a candidate for aggregation */ p = pktq_ppeek(&qi->q, prec); + /* tx_info must be checked with current p */ + tx_info = IEEE80211_SKB_CB(p); + if (p) { - tx_info = IEEE80211_SKB_CB(p); if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && ((u8) (p->priority) == tid)) { plen = p->len + AMPDU_MAX_MPDU_OVERHEAD; @@ -757,7 +759,6 @@ brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi, p = NULL; continue; } - /* next packet fit for aggregation so dequeue */ p = brcmu_pktq_pdeq(&qi->q, prec); } else { p = NULL; diff --git a/trunk/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/trunk/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c index 341e06a0d6ec..50f92a0b7c41 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c +++ b/trunk/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c @@ -721,6 +721,14 @@ static const struct ieee80211_ops brcms_ops = { .flush = brcms_ops_flush, }; +/* + * is called in brcms_bcma_probe() context, therefore no locking required. + */ +static int brcms_set_hint(struct brcms_info *wl, char *abbrev) +{ + return regulatory_hint(wl->pub->ieee_hw->wiphy, abbrev); +} + void brcms_dpc(unsigned long data) { struct brcms_info *wl; @@ -1060,9 +1068,9 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev) wiphy_err(wl->wiphy, "%s: ieee80211_register_hw failed, status" "%d\n", __func__, err); - if (wl->pub->srom_ccode[0] && - regulatory_hint(wl->wiphy, wl->pub->srom_ccode)) - wiphy_err(wl->wiphy, "%s: regulatory hint failed\n", __func__); + if (wl->pub->srom_ccode[0] && brcms_set_hint(wl, wl->pub->srom_ccode)) + wiphy_err(wl->wiphy, "%s: regulatory_hint failed, status %d\n", + __func__, err); n_adapters_found++; return wl; diff --git a/trunk/drivers/net/wireless/iwlwifi/Makefile b/trunk/drivers/net/wireless/iwlwifi/Makefile index 170ec330d2a9..98c8f6449649 100644 --- a/trunk/drivers/net/wireless/iwlwifi/Makefile +++ b/trunk/drivers/net/wireless/iwlwifi/Makefile @@ -1,3 +1,7 @@ +obj-$(CONFIG_IWLDVM) += dvm/ + +CFLAGS_iwl-devtrace.o := -I$(src) + # common obj-$(CONFIG_IWLWIFI) += iwlwifi.o iwlwifi-objs += iwl-io.o @@ -9,11 +13,5 @@ iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o iwlwifi-objs += pcie/1000.o pcie/2000.o pcie/5000.o pcie/6000.o iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o -iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-test.o ccflags-y += -D__CHECK_ENDIAN__ -I$(src) - - -obj-$(CONFIG_IWLDVM) += dvm/ - -CFLAGS_iwl-devtrace.o := -I$(src) diff --git a/trunk/drivers/net/wireless/iwlwifi/dvm/agn.h b/trunk/drivers/net/wireless/iwlwifi/dvm/agn.h index 6d102413dd94..2ae3608472a6 100644 --- a/trunk/drivers/net/wireless/iwlwifi/dvm/agn.h +++ b/trunk/drivers/net/wireless/iwlwifi/dvm/agn.h @@ -395,10 +395,8 @@ static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags) } extern int iwl_alive_start(struct iwl_priv *priv); - -/* testmode support */ +/* svtool */ #ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE - extern int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len); extern int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, @@ -406,16 +404,13 @@ extern int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct netlink_callback *cb, void *data, int len); extern void iwl_testmode_init(struct iwl_priv *priv); -extern void iwl_testmode_free(struct iwl_priv *priv); - +extern void iwl_testmode_cleanup(struct iwl_priv *priv); #else - static inline int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len) { return -ENOSYS; } - static inline int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb, struct netlink_callback *cb, @@ -423,12 +418,12 @@ int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb, { return -ENOSYS; } - -static inline void iwl_testmode_init(struct iwl_priv *priv) +static inline +void iwl_testmode_init(struct iwl_priv *priv) { } - -static inline void iwl_testmode_free(struct iwl_priv *priv) +static inline +void iwl_testmode_cleanup(struct iwl_priv *priv) { } #endif diff --git a/trunk/drivers/net/wireless/iwlwifi/dvm/dev.h b/trunk/drivers/net/wireless/iwlwifi/dvm/dev.h index 4620b657948a..89f2e1040e7f 100644 --- a/trunk/drivers/net/wireless/iwlwifi/dvm/dev.h +++ b/trunk/drivers/net/wireless/iwlwifi/dvm/dev.h @@ -52,8 +52,6 @@ #include "rs.h" #include "tt.h" -#include "iwl-test.h" - /* CT-KILL constants */ #define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */ #define CT_KILL_THRESHOLD 114 /* in Celsius */ @@ -598,6 +596,24 @@ struct iwl_lib_ops { void (*temperature)(struct iwl_priv *priv); }; +#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE +struct iwl_testmode_trace { + u32 buff_size; + u32 total_size; + u32 num_chunks; + u8 *cpu_addr; + u8 *trace_addr; + dma_addr_t dma_addr; + bool trace_enabled; +}; +struct iwl_testmode_mem { + u32 buff_size; + u32 num_chunks; + u8 *buff_addr; + bool read_in_progress; +}; +#endif + struct iwl_wipan_noa_data { struct rcu_head rcu_head; u32 length; @@ -654,6 +670,8 @@ struct iwl_priv { enum ieee80211_band band; u8 valid_contexts; + void (*pre_rx_handler)(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb); int (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, struct iwl_device_cmd *cmd); @@ -877,9 +895,9 @@ struct iwl_priv { struct led_classdev led; unsigned long blink_on, blink_off; bool led_registered; - #ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE - struct iwl_test tst; + struct iwl_testmode_trace testmode_trace; + struct iwl_testmode_mem testmode_mem; u32 tm_fixed_rate; #endif diff --git a/trunk/drivers/net/wireless/iwlwifi/dvm/lib.c b/trunk/drivers/net/wireless/iwlwifi/dvm/lib.c index 76f259283c3a..cb1ca7a25dd5 100644 --- a/trunk/drivers/net/wireless/iwlwifi/dvm/lib.c +++ b/trunk/drivers/net/wireless/iwlwifi/dvm/lib.c @@ -1265,7 +1265,7 @@ int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) * the mutex, this ensures we don't try to send two * (or more) synchronous commands at a time. */ - if (!(cmd->flags & CMD_ASYNC)) + if (cmd->flags & CMD_SYNC) lockdep_assert_held(&priv->mutex); if (priv->ucode_owner == IWL_OWNERSHIP_TM && diff --git a/trunk/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/trunk/drivers/net/wireless/iwlwifi/dvm/mac80211.c index 9d2374862314..599e8b41f5a8 100644 --- a/trunk/drivers/net/wireless/iwlwifi/dvm/mac80211.c +++ b/trunk/drivers/net/wireless/iwlwifi/dvm/mac80211.c @@ -476,7 +476,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw) } if (priv->wowlan_sram) - _iwl_read_targ_mem_dwords( + _iwl_read_targ_mem_words( priv->trans, 0x800000, priv->wowlan_sram, img->sec[IWL_UCODE_SECTION_DATA].len / 4); diff --git a/trunk/drivers/net/wireless/iwlwifi/dvm/main.c b/trunk/drivers/net/wireless/iwlwifi/dvm/main.c index e620af3d592d..1c2d0233a405 100644 --- a/trunk/drivers/net/wireless/iwlwifi/dvm/main.c +++ b/trunk/drivers/net/wireless/iwlwifi/dvm/main.c @@ -406,7 +406,7 @@ static void iwl_continuous_event_trace(struct iwl_priv *priv) base = priv->device_pointers.log_event_table; if (iwlagn_hw_valid_rtc_data_addr(base)) { - iwl_read_targ_mem_bytes(priv->trans, base, &read, sizeof(read)); + iwl_read_targ_mem_words(priv->trans, base, &read, sizeof(read)); capacity = read.capacity; mode = read.mode; num_wraps = read.wrap_counter; @@ -1548,7 +1548,7 @@ static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode) iwl_dbgfs_unregister(priv); - iwl_testmode_free(priv); + iwl_testmode_cleanup(priv); iwlagn_mac_unregister(priv); iwl_tt_exit(priv); @@ -1671,7 +1671,7 @@ static void iwl_dump_nic_error_log(struct iwl_priv *priv) } /*TODO: Update dbgfs with ISR error stats obtained below */ - iwl_read_targ_mem_bytes(trans, base, &table, sizeof(table)); + iwl_read_targ_mem_words(trans, base, &table, sizeof(table)); if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { IWL_ERR(trans, "Start IWL Error Log Dump:\n"); diff --git a/trunk/drivers/net/wireless/iwlwifi/dvm/rx.c b/trunk/drivers/net/wireless/iwlwifi/dvm/rx.c index c1f7a18e08dd..0ed90bb8b56a 100644 --- a/trunk/drivers/net/wireless/iwlwifi/dvm/rx.c +++ b/trunk/drivers/net/wireless/iwlwifi/dvm/rx.c @@ -1124,6 +1124,8 @@ int iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb, { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); + void (*pre_rx_handler)(struct iwl_priv *, + struct iwl_rx_cmd_buffer *); int err = 0; /* @@ -1133,19 +1135,19 @@ int iwl_rx_dispatch(struct iwl_op_mode *op_mode, struct iwl_rx_cmd_buffer *rxb, */ iwl_notification_wait_notify(&priv->notif_wait, pkt); -#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE - /* - * RX data may be forwarded to userspace in one - * of two cases: the user owns the fw through testmode or when - * the user requested to monitor the rx w/o affecting the regular flow. - * In these cases the iwl_test object will handle forwarding the rx - * data to user space. - * Note that if the ownership flag != IWL_OWNERSHIP_TM the flow + /* RX data may be forwarded to userspace (using pre_rx_handler) in one + * of two cases: the first, that the user owns the uCode through + * testmode - in such case the pre_rx_handler is set and no further + * processing takes place. The other case is when the user want to + * monitor the rx w/o affecting the regular flow - the pre_rx_handler + * will be set but the ownership flag != IWL_OWNERSHIP_TM and the flow * continues. + * We need to use ACCESS_ONCE to prevent a case where the handler + * changes between the check and the call. */ - iwl_test_rx(&priv->tst, rxb); -#endif - + pre_rx_handler = ACCESS_ONCE(priv->pre_rx_handler); + if (pre_rx_handler) + pre_rx_handler(priv, rxb); if (priv->ucode_owner != IWL_OWNERSHIP_TM) { /* Based on type of command response or notification, * handle those that need handling via function in diff --git a/trunk/drivers/net/wireless/iwlwifi/dvm/testmode.c b/trunk/drivers/net/wireless/iwlwifi/dvm/testmode.c index 57b918ce3b5f..e08b1a383594 100644 --- a/trunk/drivers/net/wireless/iwlwifi/dvm/testmode.c +++ b/trunk/drivers/net/wireless/iwlwifi/dvm/testmode.c @@ -60,7 +60,6 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ - #include #include #include @@ -70,84 +69,355 @@ #include #include #include - #include "iwl-debug.h" +#include "iwl-io.h" #include "iwl-trans.h" +#include "iwl-fh.h" +#include "iwl-prph.h" #include "dev.h" #include "agn.h" -#include "iwl-test.h" -#include "iwl-testmode.h" +#include "testmode.h" + + +/* Periphery registers absolute lower bound. This is used in order to + * differentiate registery access through HBUS_TARG_PRPH_* and + * HBUS_TARG_MEM_* accesses. + */ +#define IWL_TM_ABS_PRPH_START (0xA00000) + +/* The TLVs used in the gnl message policy between the kernel module and + * user space application. iwl_testmode_gnl_msg_policy is to be carried + * through the NL80211_CMD_TESTMODE channel regulated by nl80211. + * See testmode.h + */ +static +struct nla_policy iwl_testmode_gnl_msg_policy[IWL_TM_ATTR_MAX] = { + [IWL_TM_ATTR_COMMAND] = { .type = NLA_U32, }, + + [IWL_TM_ATTR_UCODE_CMD_ID] = { .type = NLA_U8, }, + [IWL_TM_ATTR_UCODE_CMD_DATA] = { .type = NLA_UNSPEC, }, + + [IWL_TM_ATTR_REG_OFFSET] = { .type = NLA_U32, }, + [IWL_TM_ATTR_REG_VALUE8] = { .type = NLA_U8, }, + [IWL_TM_ATTR_REG_VALUE32] = { .type = NLA_U32, }, + + [IWL_TM_ATTR_SYNC_RSP] = { .type = NLA_UNSPEC, }, + [IWL_TM_ATTR_UCODE_RX_PKT] = { .type = NLA_UNSPEC, }, -static int iwl_testmode_send_cmd(struct iwl_op_mode *op_mode, - struct iwl_host_cmd *cmd) + [IWL_TM_ATTR_EEPROM] = { .type = NLA_UNSPEC, }, + + [IWL_TM_ATTR_TRACE_ADDR] = { .type = NLA_UNSPEC, }, + [IWL_TM_ATTR_TRACE_DUMP] = { .type = NLA_UNSPEC, }, + [IWL_TM_ATTR_TRACE_SIZE] = { .type = NLA_U32, }, + + [IWL_TM_ATTR_FIXRATE] = { .type = NLA_U32, }, + + [IWL_TM_ATTR_UCODE_OWNER] = { .type = NLA_U8, }, + + [IWL_TM_ATTR_MEM_ADDR] = { .type = NLA_U32, }, + [IWL_TM_ATTR_BUFFER_SIZE] = { .type = NLA_U32, }, + [IWL_TM_ATTR_BUFFER_DUMP] = { .type = NLA_UNSPEC, }, + + [IWL_TM_ATTR_FW_VERSION] = { .type = NLA_U32, }, + [IWL_TM_ATTR_DEVICE_ID] = { .type = NLA_U32, }, + [IWL_TM_ATTR_FW_TYPE] = { .type = NLA_U32, }, + [IWL_TM_ATTR_FW_INST_SIZE] = { .type = NLA_U32, }, + [IWL_TM_ATTR_FW_DATA_SIZE] = { .type = NLA_U32, }, + + [IWL_TM_ATTR_ENABLE_NOTIFICATION] = {.type = NLA_FLAG, }, +}; + +/* + * See the struct iwl_rx_packet in commands.h for the format of the + * received events from the device + */ +static inline int get_event_length(struct iwl_rx_cmd_buffer *rxb) { - struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); - return iwl_dvm_send_cmd(priv, cmd); + struct iwl_rx_packet *pkt = rxb_addr(rxb); + if (pkt) + return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; + else + return 0; } -static bool iwl_testmode_valid_hw_addr(u32 addr) + +/* + * This function multicasts the spontaneous messages from the device to the + * user space. It is invoked whenever there is a received messages + * from the device. This function is called within the ISR of the rx handlers + * in iwlagn driver. + * + * The parsing of the message content is left to the user space application, + * The message content is treated as unattacked raw data and is encapsulated + * with IWL_TM_ATTR_UCODE_RX_PKT multicasting to the user space. + * + * @priv: the instance of iwlwifi device + * @rxb: pointer to rx data content received by the ISR + * + * See the message policies and TLVs in iwl_testmode_gnl_msg_policy[]. + * For the messages multicasting to the user application, the mandatory + * TLV fields are : + * IWL_TM_ATTR_COMMAND must be IWL_TM_CMD_DEV2APP_UCODE_RX_PKT + * IWL_TM_ATTR_UCODE_RX_PKT for carrying the message content + */ + +static void iwl_testmode_ucode_rx_pkt(struct iwl_priv *priv, + struct iwl_rx_cmd_buffer *rxb) { - if (iwlagn_hw_valid_rtc_data_addr(addr)) - return true; + struct ieee80211_hw *hw = priv->hw; + struct sk_buff *skb; + void *data; + int length; - if (IWLAGN_RTC_INST_LOWER_BOUND <= addr && - addr < IWLAGN_RTC_INST_UPPER_BOUND) - return true; + data = rxb_addr(rxb); + length = get_event_length(rxb); - return false; -} + if (!data || length == 0) + return; -static u32 iwl_testmode_get_fw_ver(struct iwl_op_mode *op_mode) -{ - struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); - return priv->fw->ucode_ver; + skb = cfg80211_testmode_alloc_event_skb(hw->wiphy, 20 + length, + GFP_ATOMIC); + if (skb == NULL) { + IWL_ERR(priv, + "Run out of memory for messages to user space ?\n"); + return; + } + if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) || + /* the length doesn't include len_n_flags field, so add it manually */ + nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, length + sizeof(__le32), data)) + goto nla_put_failure; + cfg80211_testmode_event(skb, GFP_ATOMIC); + return; + +nla_put_failure: + kfree_skb(skb); + IWL_ERR(priv, "Ouch, overran buffer, check allocation!\n"); } -static struct sk_buff* -iwl_testmode_alloc_reply(struct iwl_op_mode *op_mode, int len) +void iwl_testmode_init(struct iwl_priv *priv) { - struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); - return cfg80211_testmode_alloc_reply_skb(priv->hw->wiphy, len); + priv->pre_rx_handler = NULL; + priv->testmode_trace.trace_enabled = false; + priv->testmode_mem.read_in_progress = false; } -static int iwl_testmode_reply(struct iwl_op_mode *op_mode, struct sk_buff *skb) +static void iwl_mem_cleanup(struct iwl_priv *priv) { - return cfg80211_testmode_reply(skb); + if (priv->testmode_mem.read_in_progress) { + kfree(priv->testmode_mem.buff_addr); + priv->testmode_mem.buff_addr = NULL; + priv->testmode_mem.buff_size = 0; + priv->testmode_mem.num_chunks = 0; + priv->testmode_mem.read_in_progress = false; + } } -static struct sk_buff *iwl_testmode_alloc_event(struct iwl_op_mode *op_mode, - int len) +static void iwl_trace_cleanup(struct iwl_priv *priv) { - struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode); - return cfg80211_testmode_alloc_event_skb(priv->hw->wiphy, len, - GFP_ATOMIC); + if (priv->testmode_trace.trace_enabled) { + if (priv->testmode_trace.cpu_addr && + priv->testmode_trace.dma_addr) + dma_free_coherent(priv->trans->dev, + priv->testmode_trace.total_size, + priv->testmode_trace.cpu_addr, + priv->testmode_trace.dma_addr); + priv->testmode_trace.trace_enabled = false; + priv->testmode_trace.cpu_addr = NULL; + priv->testmode_trace.trace_addr = NULL; + priv->testmode_trace.dma_addr = 0; + priv->testmode_trace.buff_size = 0; + priv->testmode_trace.total_size = 0; + } } -static void iwl_testmode_event(struct iwl_op_mode *op_mode, struct sk_buff *skb) + +void iwl_testmode_cleanup(struct iwl_priv *priv) { - return cfg80211_testmode_event(skb, GFP_ATOMIC); + iwl_trace_cleanup(priv); + iwl_mem_cleanup(priv); } -static struct iwl_test_ops tst_ops = { - .send_cmd = iwl_testmode_send_cmd, - .valid_hw_addr = iwl_testmode_valid_hw_addr, - .get_fw_ver = iwl_testmode_get_fw_ver, - .alloc_reply = iwl_testmode_alloc_reply, - .reply = iwl_testmode_reply, - .alloc_event = iwl_testmode_alloc_event, - .event = iwl_testmode_event, -}; -void iwl_testmode_init(struct iwl_priv *priv) +/* + * This function handles the user application commands to the ucode. + * + * It retrieves the mandatory fields IWL_TM_ATTR_UCODE_CMD_ID and + * IWL_TM_ATTR_UCODE_CMD_DATA and calls to the handler to send the + * host command to the ucode. + * + * If any mandatory field is missing, -ENOMSG is replied to the user space + * application; otherwise, waits for the host command to be sent and checks + * the return code. In case or error, it is returned, otherwise a reply is + * allocated and the reply RX packet + * is returned. + * + * @hw: ieee80211_hw object that represents the device + * @tb: gnl message fields from the user space + */ +static int iwl_testmode_ucode(struct ieee80211_hw *hw, struct nlattr **tb) { - iwl_test_init(&priv->tst, priv->trans, &tst_ops); + struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); + struct iwl_host_cmd cmd; + struct iwl_rx_packet *pkt; + struct sk_buff *skb; + void *reply_buf; + u32 reply_len; + int ret; + bool cmd_want_skb; + + memset(&cmd, 0, sizeof(struct iwl_host_cmd)); + + if (!tb[IWL_TM_ATTR_UCODE_CMD_ID] || + !tb[IWL_TM_ATTR_UCODE_CMD_DATA]) { + IWL_ERR(priv, "Missing ucode command mandatory fields\n"); + return -ENOMSG; + } + + cmd.flags = CMD_ON_DEMAND | CMD_SYNC; + cmd_want_skb = nla_get_flag(tb[IWL_TM_ATTR_UCODE_CMD_SKB]); + if (cmd_want_skb) + cmd.flags |= CMD_WANT_SKB; + + cmd.id = nla_get_u8(tb[IWL_TM_ATTR_UCODE_CMD_ID]); + cmd.data[0] = nla_data(tb[IWL_TM_ATTR_UCODE_CMD_DATA]); + cmd.len[0] = nla_len(tb[IWL_TM_ATTR_UCODE_CMD_DATA]); + cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY; + IWL_DEBUG_INFO(priv, "testmode ucode command ID 0x%x, flags 0x%x," + " len %d\n", cmd.id, cmd.flags, cmd.len[0]); + + ret = iwl_dvm_send_cmd(priv, &cmd); + if (ret) { + IWL_ERR(priv, "Failed to send hcmd\n"); + return ret; + } + if (!cmd_want_skb) + return ret; + + /* Handling return of SKB to the user */ + pkt = cmd.resp_pkt; + if (!pkt) { + IWL_ERR(priv, "HCMD received a null response packet\n"); + return ret; + } + + reply_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; + skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, reply_len + 20); + reply_buf = kmalloc(reply_len, GFP_KERNEL); + if (!skb || !reply_buf) { + kfree_skb(skb); + kfree(reply_buf); + return -ENOMEM; + } + + /* The reply is in a page, that we cannot send to user space. */ + memcpy(reply_buf, &(pkt->hdr), reply_len); + iwl_free_resp(&cmd); + + if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND, IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) || + nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, reply_len, reply_buf)) + goto nla_put_failure; + return cfg80211_testmode_reply(skb); + +nla_put_failure: + IWL_DEBUG_INFO(priv, "Failed creating NL attributes\n"); + return -ENOMSG; } -void iwl_testmode_free(struct iwl_priv *priv) + +/* + * This function handles the user application commands for register access. + * + * It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the + * handlers respectively. + * + * If it's an unknown commdn ID, -ENOSYS is returned; or -ENOMSG if the + * mandatory fields(IWL_TM_ATTR_REG_OFFSET,IWL_TM_ATTR_REG_VALUE32, + * IWL_TM_ATTR_REG_VALUE8) are missing; Otherwise 0 is replied indicating + * the success of the command execution. + * + * If IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_REG_READ32, the register read + * value is returned with IWL_TM_ATTR_REG_VALUE32. + * + * @hw: ieee80211_hw object that represents the device + * @tb: gnl message fields from the user space + */ +static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb) { - iwl_test_free(&priv->tst); + struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); + u32 ofs, val32, cmd; + u8 val8; + struct sk_buff *skb; + int status = 0; + + if (!tb[IWL_TM_ATTR_REG_OFFSET]) { + IWL_ERR(priv, "Missing register offset\n"); + return -ENOMSG; + } + ofs = nla_get_u32(tb[IWL_TM_ATTR_REG_OFFSET]); + IWL_INFO(priv, "testmode register access command offset 0x%x\n", ofs); + + /* Allow access only to FH/CSR/HBUS in direct mode. + Since we don't have the upper bounds for the CSR and HBUS segments, + we will use only the upper bound of FH for sanity check. */ + cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]); + if ((cmd == IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32 || + cmd == IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32 || + cmd == IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8) && + (ofs >= FH_MEM_UPPER_BOUND)) { + IWL_ERR(priv, "offset out of segment (0x0 - 0x%x)\n", + FH_MEM_UPPER_BOUND); + return -EINVAL; + } + + switch (cmd) { + case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32: + val32 = iwl_read_direct32(priv->trans, ofs); + IWL_INFO(priv, "32bit value to read 0x%x\n", val32); + + skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20); + if (!skb) { + IWL_ERR(priv, "Memory allocation fail\n"); + return -ENOMEM; + } + if (nla_put_u32(skb, IWL_TM_ATTR_REG_VALUE32, val32)) + goto nla_put_failure; + status = cfg80211_testmode_reply(skb); + if (status < 0) + IWL_ERR(priv, "Error sending msg : %d\n", status); + break; + case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32: + if (!tb[IWL_TM_ATTR_REG_VALUE32]) { + IWL_ERR(priv, "Missing value to write\n"); + return -ENOMSG; + } else { + val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]); + IWL_INFO(priv, "32bit value to write 0x%x\n", val32); + iwl_write_direct32(priv->trans, ofs, val32); + } + break; + case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8: + if (!tb[IWL_TM_ATTR_REG_VALUE8]) { + IWL_ERR(priv, "Missing value to write\n"); + return -ENOMSG; + } else { + val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]); + IWL_INFO(priv, "8bit value to write 0x%x\n", val8); + iwl_write8(priv->trans, ofs, val8); + } + break; + default: + IWL_ERR(priv, "Unknown testmode register command ID\n"); + return -ENOSYS; + } + + return status; + +nla_put_failure: + kfree_skb(skb); + return -EMSGSIZE; } + static int iwl_testmode_cfg_init_calib(struct iwl_priv *priv) { struct iwl_notification_wait calib_wait; @@ -199,7 +469,7 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb) struct sk_buff *skb; unsigned char *rsp_data_ptr = NULL; int status = 0, rsp_data_len = 0; - u32 inst_size = 0, data_size = 0; + u32 devid, inst_size = 0, data_size = 0; const struct fw_img *img; switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) { @@ -293,6 +563,39 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb) priv->tm_fixed_rate = nla_get_u32(tb[IWL_TM_ATTR_FIXRATE]); break; + case IWL_TM_CMD_APP2DEV_GET_FW_VERSION: + IWL_INFO(priv, "uCode version raw: 0x%x\n", + priv->fw->ucode_ver); + + skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20); + if (!skb) { + IWL_ERR(priv, "Memory allocation fail\n"); + return -ENOMEM; + } + if (nla_put_u32(skb, IWL_TM_ATTR_FW_VERSION, + priv->fw->ucode_ver)) + goto nla_put_failure; + status = cfg80211_testmode_reply(skb); + if (status < 0) + IWL_ERR(priv, "Error sending msg : %d\n", status); + break; + + case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID: + devid = priv->trans->hw_id; + IWL_INFO(priv, "hw version: 0x%x\n", devid); + + skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20); + if (!skb) { + IWL_ERR(priv, "Memory allocation fail\n"); + return -ENOMEM; + } + if (nla_put_u32(skb, IWL_TM_ATTR_DEVICE_ID, devid)) + goto nla_put_failure; + status = cfg80211_testmode_reply(skb); + if (status < 0) + IWL_ERR(priv, "Error sending msg : %d\n", status); + break; + case IWL_TM_CMD_APP2DEV_GET_FW_INFO: skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20 + 8); if (!skb) { @@ -327,6 +630,125 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb) return -EMSGSIZE; } + +/* + * This function handles the user application commands for uCode trace + * + * It retrieves command ID carried with IWL_TM_ATTR_COMMAND and calls to the + * handlers respectively. + * + * If it's an unknown commdn ID, -ENOSYS is replied; otherwise, the returned + * value of the actual command execution is replied to the user application. + * + * @hw: ieee80211_hw object that represents the device + * @tb: gnl message fields from the user space + */ +static int iwl_testmode_trace(struct ieee80211_hw *hw, struct nlattr **tb) +{ + struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); + struct sk_buff *skb; + int status = 0; + struct device *dev = priv->trans->dev; + + switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) { + case IWL_TM_CMD_APP2DEV_BEGIN_TRACE: + if (priv->testmode_trace.trace_enabled) + return -EBUSY; + + if (!tb[IWL_TM_ATTR_TRACE_SIZE]) + priv->testmode_trace.buff_size = TRACE_BUFF_SIZE_DEF; + else + priv->testmode_trace.buff_size = + nla_get_u32(tb[IWL_TM_ATTR_TRACE_SIZE]); + if (!priv->testmode_trace.buff_size) + return -EINVAL; + if (priv->testmode_trace.buff_size < TRACE_BUFF_SIZE_MIN || + priv->testmode_trace.buff_size > TRACE_BUFF_SIZE_MAX) + return -EINVAL; + + priv->testmode_trace.total_size = + priv->testmode_trace.buff_size + TRACE_BUFF_PADD; + priv->testmode_trace.cpu_addr = + dma_alloc_coherent(dev, + priv->testmode_trace.total_size, + &priv->testmode_trace.dma_addr, + GFP_KERNEL); + if (!priv->testmode_trace.cpu_addr) + return -ENOMEM; + priv->testmode_trace.trace_enabled = true; + priv->testmode_trace.trace_addr = (u8 *)PTR_ALIGN( + priv->testmode_trace.cpu_addr, 0x100); + memset(priv->testmode_trace.trace_addr, 0x03B, + priv->testmode_trace.buff_size); + skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, + sizeof(priv->testmode_trace.dma_addr) + 20); + if (!skb) { + IWL_ERR(priv, "Memory allocation fail\n"); + iwl_trace_cleanup(priv); + return -ENOMEM; + } + if (nla_put(skb, IWL_TM_ATTR_TRACE_ADDR, + sizeof(priv->testmode_trace.dma_addr), + (u64 *)&priv->testmode_trace.dma_addr)) + goto nla_put_failure; + status = cfg80211_testmode_reply(skb); + if (status < 0) { + IWL_ERR(priv, "Error sending msg : %d\n", status); + } + priv->testmode_trace.num_chunks = + DIV_ROUND_UP(priv->testmode_trace.buff_size, + DUMP_CHUNK_SIZE); + break; + + case IWL_TM_CMD_APP2DEV_END_TRACE: + iwl_trace_cleanup(priv); + break; + default: + IWL_ERR(priv, "Unknown testmode mem command ID\n"); + return -ENOSYS; + } + return status; + +nla_put_failure: + kfree_skb(skb); + if (nla_get_u32(tb[IWL_TM_ATTR_COMMAND]) == + IWL_TM_CMD_APP2DEV_BEGIN_TRACE) + iwl_trace_cleanup(priv); + return -EMSGSIZE; +} + +static int iwl_testmode_trace_dump(struct ieee80211_hw *hw, + struct sk_buff *skb, + struct netlink_callback *cb) +{ + struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); + int idx, length; + + if (priv->testmode_trace.trace_enabled && + priv->testmode_trace.trace_addr) { + idx = cb->args[4]; + if (idx >= priv->testmode_trace.num_chunks) + return -ENOENT; + length = DUMP_CHUNK_SIZE; + if (((idx + 1) == priv->testmode_trace.num_chunks) && + (priv->testmode_trace.buff_size % DUMP_CHUNK_SIZE)) + length = priv->testmode_trace.buff_size % + DUMP_CHUNK_SIZE; + + if (nla_put(skb, IWL_TM_ATTR_TRACE_DUMP, length, + priv->testmode_trace.trace_addr + + (DUMP_CHUNK_SIZE * idx))) + goto nla_put_failure; + idx++; + cb->args[4] = idx; + return 0; + } else + return -EFAULT; + + nla_put_failure: + return -ENOBUFS; +} + /* * This function handles the user application switch ucode ownership. * @@ -355,10 +777,10 @@ static int iwl_testmode_ownership(struct ieee80211_hw *hw, struct nlattr **tb) owner = nla_get_u8(tb[IWL_TM_ATTR_UCODE_OWNER]); if (owner == IWL_OWNERSHIP_DRIVER) { priv->ucode_owner = owner; - iwl_test_enable_notifications(&priv->tst, false); + priv->pre_rx_handler = NULL; } else if (owner == IWL_OWNERSHIP_TM) { + priv->pre_rx_handler = iwl_testmode_ucode_rx_pkt; priv->ucode_owner = owner; - iwl_test_enable_notifications(&priv->tst, true); } else { IWL_ERR(priv, "Invalid owner\n"); return -EINVAL; @@ -366,6 +788,180 @@ static int iwl_testmode_ownership(struct ieee80211_hw *hw, struct nlattr **tb) return 0; } +static int iwl_testmode_indirect_read(struct iwl_priv *priv, u32 addr, u32 size) +{ + struct iwl_trans *trans = priv->trans; + unsigned long flags; + int i; + + if (size & 0x3) + return -EINVAL; + priv->testmode_mem.buff_size = size; + priv->testmode_mem.buff_addr = + kmalloc(priv->testmode_mem.buff_size, GFP_KERNEL); + if (priv->testmode_mem.buff_addr == NULL) + return -ENOMEM; + + /* Hard-coded periphery absolute address */ + if (IWL_TM_ABS_PRPH_START <= addr && + addr < IWL_TM_ABS_PRPH_START + PRPH_END) { + spin_lock_irqsave(&trans->reg_lock, flags); + iwl_grab_nic_access(trans); + iwl_write32(trans, HBUS_TARG_PRPH_RADDR, + addr | (3 << 24)); + for (i = 0; i < size; i += 4) + *(u32 *)(priv->testmode_mem.buff_addr + i) = + iwl_read32(trans, HBUS_TARG_PRPH_RDAT); + iwl_release_nic_access(trans); + spin_unlock_irqrestore(&trans->reg_lock, flags); + } else { /* target memory (SRAM) */ + _iwl_read_targ_mem_words(trans, addr, + priv->testmode_mem.buff_addr, + priv->testmode_mem.buff_size / 4); + } + + priv->testmode_mem.num_chunks = + DIV_ROUND_UP(priv->testmode_mem.buff_size, DUMP_CHUNK_SIZE); + priv->testmode_mem.read_in_progress = true; + return 0; + +} + +static int iwl_testmode_indirect_write(struct iwl_priv *priv, u32 addr, + u32 size, unsigned char *buf) +{ + struct iwl_trans *trans = priv->trans; + u32 val, i; + unsigned long flags; + + if (IWL_TM_ABS_PRPH_START <= addr && + addr < IWL_TM_ABS_PRPH_START + PRPH_END) { + /* Periphery writes can be 1-3 bytes long, or DWORDs */ + if (size < 4) { + memcpy(&val, buf, size); + spin_lock_irqsave(&trans->reg_lock, flags); + iwl_grab_nic_access(trans); + iwl_write32(trans, HBUS_TARG_PRPH_WADDR, + (addr & 0x0000FFFF) | + ((size - 1) << 24)); + iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val); + iwl_release_nic_access(trans); + /* needed after consecutive writes w/o read */ + mmiowb(); + spin_unlock_irqrestore(&trans->reg_lock, flags); + } else { + if (size % 4) + return -EINVAL; + for (i = 0; i < size; i += 4) + iwl_write_prph(trans, addr+i, + *(u32 *)(buf+i)); + } + } else if (iwlagn_hw_valid_rtc_data_addr(addr) || + (IWLAGN_RTC_INST_LOWER_BOUND <= addr && + addr < IWLAGN_RTC_INST_UPPER_BOUND)) { + _iwl_write_targ_mem_words(trans, addr, buf, size/4); + } else + return -EINVAL; + return 0; +} + +/* + * This function handles the user application commands for SRAM data dump + * + * It retrieves the mandatory fields IWL_TM_ATTR_SRAM_ADDR and + * IWL_TM_ATTR_SRAM_SIZE to decide the memory area for SRAM data reading + * + * Several error will be retured, -EBUSY if the SRAM data retrieved by + * previous command has not been delivered to userspace, or -ENOMSG if + * the mandatory fields (IWL_TM_ATTR_SRAM_ADDR,IWL_TM_ATTR_SRAM_SIZE) + * are missing, or -ENOMEM if the buffer allocation fails. + * + * Otherwise 0 is replied indicating the success of the SRAM reading. + * + * @hw: ieee80211_hw object that represents the device + * @tb: gnl message fields from the user space + */ +static int iwl_testmode_indirect_mem(struct ieee80211_hw *hw, + struct nlattr **tb) +{ + struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); + u32 addr, size, cmd; + unsigned char *buf; + + /* Both read and write should be blocked, for atomicity */ + if (priv->testmode_mem.read_in_progress) + return -EBUSY; + + cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]); + if (!tb[IWL_TM_ATTR_MEM_ADDR]) { + IWL_ERR(priv, "Error finding memory offset address\n"); + return -ENOMSG; + } + addr = nla_get_u32(tb[IWL_TM_ATTR_MEM_ADDR]); + if (!tb[IWL_TM_ATTR_BUFFER_SIZE]) { + IWL_ERR(priv, "Error finding size for memory reading\n"); + return -ENOMSG; + } + size = nla_get_u32(tb[IWL_TM_ATTR_BUFFER_SIZE]); + + if (cmd == IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ) + return iwl_testmode_indirect_read(priv, addr, size); + else { + if (!tb[IWL_TM_ATTR_BUFFER_DUMP]) + return -EINVAL; + buf = (unsigned char *) nla_data(tb[IWL_TM_ATTR_BUFFER_DUMP]); + return iwl_testmode_indirect_write(priv, addr, size, buf); + } +} + +static int iwl_testmode_buffer_dump(struct ieee80211_hw *hw, + struct sk_buff *skb, + struct netlink_callback *cb) +{ + struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); + int idx, length; + + if (priv->testmode_mem.read_in_progress) { + idx = cb->args[4]; + if (idx >= priv->testmode_mem.num_chunks) { + iwl_mem_cleanup(priv); + return -ENOENT; + } + length = DUMP_CHUNK_SIZE; + if (((idx + 1) == priv->testmode_mem.num_chunks) && + (priv->testmode_mem.buff_size % DUMP_CHUNK_SIZE)) + length = priv->testmode_mem.buff_size % + DUMP_CHUNK_SIZE; + + if (nla_put(skb, IWL_TM_ATTR_BUFFER_DUMP, length, + priv->testmode_mem.buff_addr + + (DUMP_CHUNK_SIZE * idx))) + goto nla_put_failure; + idx++; + cb->args[4] = idx; + return 0; + } else + return -EFAULT; + + nla_put_failure: + return -ENOBUFS; +} + +static int iwl_testmode_notifications(struct ieee80211_hw *hw, + struct nlattr **tb) +{ + struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); + bool enable; + + enable = nla_get_flag(tb[IWL_TM_ATTR_ENABLE_NOTIFICATION]); + if (enable) + priv->pre_rx_handler = iwl_testmode_ucode_rx_pkt; + else + priv->pre_rx_handler = NULL; + return 0; +} + + /* The testmode gnl message handler that takes the gnl message from the * user space and parses it per the policy iwl_testmode_gnl_msg_policy, then * invoke the corresponding handlers. @@ -391,27 +987,32 @@ int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len) struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); int result; - result = iwl_test_parse(&priv->tst, tb, data, len); - if (result) + result = nla_parse(tb, IWL_TM_ATTR_MAX - 1, data, len, + iwl_testmode_gnl_msg_policy); + if (result != 0) { + IWL_ERR(priv, "Error parsing the gnl message : %d\n", result); return result; + } + /* IWL_TM_ATTR_COMMAND is absolutely mandatory */ + if (!tb[IWL_TM_ATTR_COMMAND]) { + IWL_ERR(priv, "Missing testmode command type\n"); + return -ENOMSG; + } /* in case multiple accesses to the device happens */ mutex_lock(&priv->mutex); + switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) { case IWL_TM_CMD_APP2DEV_UCODE: + IWL_DEBUG_INFO(priv, "testmode cmd to uCode\n"); + result = iwl_testmode_ucode(hw, tb); + break; case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32: case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32: case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8: - case IWL_TM_CMD_APP2DEV_BEGIN_TRACE: - case IWL_TM_CMD_APP2DEV_END_TRACE: - case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ: - case IWL_TM_CMD_APP2DEV_NOTIFICATIONS: - case IWL_TM_CMD_APP2DEV_GET_FW_VERSION: - case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID: - case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE: - result = iwl_test_handle_cmd(&priv->tst, tb); + IWL_DEBUG_INFO(priv, "testmode cmd to register\n"); + result = iwl_testmode_reg(hw, tb); break; - case IWL_TM_CMD_APP2DEV_GET_DEVICENAME: case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW: case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB: @@ -419,25 +1020,45 @@ int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len) case IWL_TM_CMD_APP2DEV_GET_EEPROM: case IWL_TM_CMD_APP2DEV_FIXRATE_REQ: case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW: + case IWL_TM_CMD_APP2DEV_GET_FW_VERSION: + case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID: case IWL_TM_CMD_APP2DEV_GET_FW_INFO: IWL_DEBUG_INFO(priv, "testmode cmd to driver\n"); result = iwl_testmode_driver(hw, tb); break; + case IWL_TM_CMD_APP2DEV_BEGIN_TRACE: + case IWL_TM_CMD_APP2DEV_END_TRACE: + case IWL_TM_CMD_APP2DEV_READ_TRACE: + IWL_DEBUG_INFO(priv, "testmode uCode trace cmd to driver\n"); + result = iwl_testmode_trace(hw, tb); + break; + case IWL_TM_CMD_APP2DEV_OWNERSHIP: IWL_DEBUG_INFO(priv, "testmode change uCode ownership\n"); result = iwl_testmode_ownership(hw, tb); break; + case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ: + case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE: + IWL_DEBUG_INFO(priv, "testmode indirect memory cmd " + "to driver\n"); + result = iwl_testmode_indirect_mem(hw, tb); + break; + + case IWL_TM_CMD_APP2DEV_NOTIFICATIONS: + IWL_DEBUG_INFO(priv, "testmode notifications cmd " + "to driver\n"); + result = iwl_testmode_notifications(hw, tb); + break; + default: IWL_ERR(priv, "Unknown testmode command\n"); result = -ENOSYS; break; } - mutex_unlock(&priv->mutex); - if (result) - IWL_ERR(priv, "Test cmd failed result=%d\n", result); + mutex_unlock(&priv->mutex); return result; } @@ -445,6 +1066,7 @@ int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb, struct netlink_callback *cb, void *data, int len) { + struct nlattr *tb[IWL_TM_ATTR_MAX]; struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); int result; u32 cmd; @@ -453,19 +1075,39 @@ int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb, /* offset by 1 since commands start at 0 */ cmd = cb->args[3] - 1; } else { - struct nlattr *tb[IWL_TM_ATTR_MAX]; - - result = iwl_test_parse(&priv->tst, tb, data, len); - if (result) + result = nla_parse(tb, IWL_TM_ATTR_MAX - 1, data, len, + iwl_testmode_gnl_msg_policy); + if (result) { + IWL_ERR(priv, + "Error parsing the gnl message : %d\n", result); return result; + } + /* IWL_TM_ATTR_COMMAND is absolutely mandatory */ + if (!tb[IWL_TM_ATTR_COMMAND]) { + IWL_ERR(priv, "Missing testmode command type\n"); + return -ENOMSG; + } cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]); cb->args[3] = cmd + 1; } /* in case multiple accesses to the device happens */ mutex_lock(&priv->mutex); - result = iwl_test_dump(&priv->tst, cmd, skb, cb); + switch (cmd) { + case IWL_TM_CMD_APP2DEV_READ_TRACE: + IWL_DEBUG_INFO(priv, "uCode trace cmd to driver\n"); + result = iwl_testmode_trace_dump(hw, skb, cb); + break; + case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP: + IWL_DEBUG_INFO(priv, "testmode sram dump cmd to driver\n"); + result = iwl_testmode_buffer_dump(hw, skb, cb); + break; + default: + result = -EINVAL; + break; + } + mutex_unlock(&priv->mutex); return result; } diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-testmode.h b/trunk/drivers/net/wireless/iwlwifi/dvm/testmode.h similarity index 100% rename from trunk/drivers/net/wireless/iwlwifi/iwl-testmode.h rename to trunk/drivers/net/wireless/iwlwifi/dvm/testmode.h diff --git a/trunk/drivers/net/wireless/iwlwifi/dvm/tx.c b/trunk/drivers/net/wireless/iwlwifi/dvm/tx.c index 5971a23aa47d..0dfaf649b257 100644 --- a/trunk/drivers/net/wireless/iwlwifi/dvm/tx.c +++ b/trunk/drivers/net/wireless/iwlwifi/dvm/tx.c @@ -403,7 +403,6 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb) info->driver_data[0] = ctx; info->driver_data[1] = dev_cmd; - /* From now on, we cannot access info->control */ spin_lock(&priv->sta_lock); @@ -1183,8 +1182,7 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, } /*we can free until ssn % q.n_bd not inclusive */ - WARN_ON_ONCE(iwl_reclaim(priv, sta_id, tid, - txq_id, ssn, &skbs)); + WARN_ON(iwl_reclaim(priv, sta_id, tid, txq_id, ssn, &skbs)); iwlagn_check_ratid_empty(priv, sta_id, tid); freed = 0; diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-drv.c b/trunk/drivers/net/wireless/iwlwifi/iwl-drv.c index a175997e7829..49df0e9d5c5f 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-drv.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-drv.c @@ -131,8 +131,6 @@ struct iwl_drv { #define DVM_OP_MODE 0 #define MVM_OP_MODE 1 -/* Protects the table contents, i.e. the ops pointer & drv list */ -static struct mutex iwlwifi_opmode_table_mtx; static struct iwlwifi_opmode_table { const char *name; /* name: iwldvm, iwlmvm, etc */ const struct iwl_op_mode_ops *ops; /* pointer to op_mode ops */ @@ -778,7 +776,6 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context) const unsigned int api_min = drv->cfg->ucode_api_min; u32 api_ver; int i; - bool load_module = false; fw->ucode_capa.max_probe_length = 200; fw->ucode_capa.standard_phy_calibration_size = @@ -901,7 +898,6 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context) /* We have our copies now, allow OS release its copies */ release_firmware(ucode_raw); - mutex_lock(&iwlwifi_opmode_table_mtx); op = &iwlwifi_opmode_table[DVM_OP_MODE]; /* add this device to the list of devices using this op_mode */ @@ -911,14 +907,11 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context) const struct iwl_op_mode_ops *ops = op->ops; drv->op_mode = ops->start(drv->trans, drv->cfg, &drv->fw); - if (!drv->op_mode) { - mutex_unlock(&iwlwifi_opmode_table_mtx); + if (!drv->op_mode) goto out_unbind; - } } else { - load_module = true; + request_module_nowait("%s", op->name); } - mutex_unlock(&iwlwifi_opmode_table_mtx); /* * Complete the firmware request last so that @@ -926,14 +919,6 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context) * are doing the start() above. */ complete(&drv->request_firmware_complete); - - /* - * Load the module last so we don't block anything - * else from proceeding if the module fails to load - * or hangs loading. - */ - if (load_module) - request_module("%s", op->name); return; try_again: @@ -967,7 +952,6 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans, drv->cfg = cfg; init_completion(&drv->request_firmware_complete); - INIT_LIST_HEAD(&drv->list); ret = iwl_request_firmware(drv, true); @@ -990,16 +974,6 @@ void iwl_drv_stop(struct iwl_drv *drv) iwl_dealloc_ucode(drv); - mutex_lock(&iwlwifi_opmode_table_mtx); - /* - * List is empty (this item wasn't added) - * when firmware loading failed -- in that - * case we can't remove it from any list. - */ - if (!list_empty(&drv->list)) - list_del(&drv->list); - mutex_unlock(&iwlwifi_opmode_table_mtx); - kfree(drv); } @@ -1022,7 +996,6 @@ int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops) int i; struct iwl_drv *drv; - mutex_lock(&iwlwifi_opmode_table_mtx); for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) { if (strcmp(iwlwifi_opmode_table[i].name, name)) continue; @@ -1030,10 +1003,8 @@ int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops) list_for_each_entry(drv, &iwlwifi_opmode_table[i].drv, list) drv->op_mode = ops->start(drv->trans, drv->cfg, &drv->fw); - mutex_unlock(&iwlwifi_opmode_table_mtx); return 0; } - mutex_unlock(&iwlwifi_opmode_table_mtx); return -EIO; } EXPORT_SYMBOL_GPL(iwl_opmode_register); @@ -1043,7 +1014,6 @@ void iwl_opmode_deregister(const char *name) int i; struct iwl_drv *drv; - mutex_lock(&iwlwifi_opmode_table_mtx); for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) { if (strcmp(iwlwifi_opmode_table[i].name, name)) continue; @@ -1056,10 +1026,8 @@ void iwl_opmode_deregister(const char *name) drv->op_mode = NULL; } } - mutex_unlock(&iwlwifi_opmode_table_mtx); return; } - mutex_unlock(&iwlwifi_opmode_table_mtx); } EXPORT_SYMBOL_GPL(iwl_opmode_deregister); @@ -1067,8 +1035,6 @@ static int __init iwl_drv_init(void) { int i; - mutex_init(&iwlwifi_opmode_table_mtx); - for (i = 0; i < ARRAY_SIZE(iwlwifi_opmode_table); i++) INIT_LIST_HEAD(&iwlwifi_opmode_table[i].drv); diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-fh.h b/trunk/drivers/net/wireless/iwlwifi/iwl-fh.h index 806046641747..74bce97a8600 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-fh.h +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-fh.h @@ -421,8 +421,6 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl) (FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4) #define FH_TX_CHICKEN_BITS_REG (FH_MEM_LOWER_BOUND + 0xE98) -#define FH_TX_TRB_REG(_chan) (FH_MEM_LOWER_BOUND + 0x958 + (_chan) * 4) - /* Instruct FH to increment the retry count of a packet when * it is brought from the memory to TX-FIFO */ diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-io.c b/trunk/drivers/net/wireless/iwlwifi/iwl-io.c index 66c873399aba..5f2df70b73c1 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-io.c +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-io.c @@ -298,8 +298,8 @@ void iwl_clear_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask) } EXPORT_SYMBOL_GPL(iwl_clear_bits_prph); -void _iwl_read_targ_mem_dwords(struct iwl_trans *trans, u32 addr, - void *buf, int dwords) +void _iwl_read_targ_mem_words(struct iwl_trans *trans, u32 addr, + void *buf, int words) { unsigned long flags; int offs; @@ -308,26 +308,26 @@ void _iwl_read_targ_mem_dwords(struct iwl_trans *trans, u32 addr, spin_lock_irqsave(&trans->reg_lock, flags); if (likely(iwl_grab_nic_access(trans))) { iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr); - for (offs = 0; offs < dwords; offs++) + for (offs = 0; offs < words; offs++) vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT); iwl_release_nic_access(trans); } spin_unlock_irqrestore(&trans->reg_lock, flags); } -EXPORT_SYMBOL_GPL(_iwl_read_targ_mem_dwords); +EXPORT_SYMBOL_GPL(_iwl_read_targ_mem_words); u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr) { u32 value; - _iwl_read_targ_mem_dwords(trans, addr, &value, 1); + _iwl_read_targ_mem_words(trans, addr, &value, 1); return value; } EXPORT_SYMBOL_GPL(iwl_read_targ_mem); -int _iwl_write_targ_mem_dwords(struct iwl_trans *trans, u32 addr, - void *buf, int dwords) +int _iwl_write_targ_mem_words(struct iwl_trans *trans, u32 addr, + void *buf, int words) { unsigned long flags; int offs, result = 0; @@ -336,7 +336,7 @@ int _iwl_write_targ_mem_dwords(struct iwl_trans *trans, u32 addr, spin_lock_irqsave(&trans->reg_lock, flags); if (likely(iwl_grab_nic_access(trans))) { iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr); - for (offs = 0; offs < dwords; offs++) + for (offs = 0; offs < words; offs++) iwl_write32(trans, HBUS_TARG_MEM_WDAT, vals[offs]); iwl_release_nic_access(trans); } else @@ -345,10 +345,10 @@ int _iwl_write_targ_mem_dwords(struct iwl_trans *trans, u32 addr, return result; } -EXPORT_SYMBOL_GPL(_iwl_write_targ_mem_dwords); +EXPORT_SYMBOL_GPL(_iwl_write_targ_mem_words); int iwl_write_targ_mem(struct iwl_trans *trans, u32 addr, u32 val) { - return _iwl_write_targ_mem_dwords(trans, addr, &val, 1); + return _iwl_write_targ_mem_words(trans, addr, &val, 1); } EXPORT_SYMBOL_GPL(iwl_write_targ_mem); diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-io.h b/trunk/drivers/net/wireless/iwlwifi/iwl-io.h index 50d3819739d1..4a9a45f771ed 100644 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-io.h +++ b/trunk/drivers/net/wireless/iwlwifi/iwl-io.h @@ -76,18 +76,18 @@ void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 reg, u32 bits, u32 mask); void iwl_clear_bits_prph(struct iwl_trans *trans, u32 reg, u32 mask); -void _iwl_read_targ_mem_dwords(struct iwl_trans *trans, u32 addr, - void *buf, int dwords); +void _iwl_read_targ_mem_words(struct iwl_trans *trans, u32 addr, + void *buf, int words); -#define iwl_read_targ_mem_bytes(trans, addr, buf, bufsize) \ +#define iwl_read_targ_mem_words(trans, addr, buf, bufsize) \ do { \ BUILD_BUG_ON((bufsize) % sizeof(u32)); \ - _iwl_read_targ_mem_dwords(trans, addr, buf, \ - (bufsize) / sizeof(u32));\ + _iwl_read_targ_mem_words(trans, addr, buf, \ + (bufsize) / sizeof(u32));\ } while (0) -int _iwl_write_targ_mem_dwords(struct iwl_trans *trans, u32 addr, - void *buf, int dwords); +int _iwl_write_targ_mem_words(struct iwl_trans *trans, u32 addr, + void *buf, int words); u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr); int iwl_write_targ_mem(struct iwl_trans *trans, u32 addr, u32 val); diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-test.c b/trunk/drivers/net/wireless/iwlwifi/iwl-test.c deleted file mode 100644 index 81e8c7126d72..000000000000 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-test.c +++ /dev/null @@ -1,856 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - -#include -#include - -#include "iwl-io.h" -#include "iwl-fh.h" -#include "iwl-prph.h" -#include "iwl-trans.h" -#include "iwl-test.h" -#include "iwl-csr.h" -#include "iwl-testmode.h" - -/* - * Periphery registers absolute lower bound. This is used in order to - * differentiate registery access through HBUS_TARG_PRPH_* and - * HBUS_TARG_MEM_* accesses. - */ -#define IWL_ABS_PRPH_START (0xA00000) - -/* - * The TLVs used in the gnl message policy between the kernel module and - * user space application. iwl_testmode_gnl_msg_policy is to be carried - * through the NL80211_CMD_TESTMODE channel regulated by nl80211. - * See iwl-testmode.h - */ -static -struct nla_policy iwl_testmode_gnl_msg_policy[IWL_TM_ATTR_MAX] = { - [IWL_TM_ATTR_COMMAND] = { .type = NLA_U32, }, - - [IWL_TM_ATTR_UCODE_CMD_ID] = { .type = NLA_U8, }, - [IWL_TM_ATTR_UCODE_CMD_DATA] = { .type = NLA_UNSPEC, }, - - [IWL_TM_ATTR_REG_OFFSET] = { .type = NLA_U32, }, - [IWL_TM_ATTR_REG_VALUE8] = { .type = NLA_U8, }, - [IWL_TM_ATTR_REG_VALUE32] = { .type = NLA_U32, }, - - [IWL_TM_ATTR_SYNC_RSP] = { .type = NLA_UNSPEC, }, - [IWL_TM_ATTR_UCODE_RX_PKT] = { .type = NLA_UNSPEC, }, - - [IWL_TM_ATTR_EEPROM] = { .type = NLA_UNSPEC, }, - - [IWL_TM_ATTR_TRACE_ADDR] = { .type = NLA_UNSPEC, }, - [IWL_TM_ATTR_TRACE_DUMP] = { .type = NLA_UNSPEC, }, - [IWL_TM_ATTR_TRACE_SIZE] = { .type = NLA_U32, }, - - [IWL_TM_ATTR_FIXRATE] = { .type = NLA_U32, }, - - [IWL_TM_ATTR_UCODE_OWNER] = { .type = NLA_U8, }, - - [IWL_TM_ATTR_MEM_ADDR] = { .type = NLA_U32, }, - [IWL_TM_ATTR_BUFFER_SIZE] = { .type = NLA_U32, }, - [IWL_TM_ATTR_BUFFER_DUMP] = { .type = NLA_UNSPEC, }, - - [IWL_TM_ATTR_FW_VERSION] = { .type = NLA_U32, }, - [IWL_TM_ATTR_DEVICE_ID] = { .type = NLA_U32, }, - [IWL_TM_ATTR_FW_TYPE] = { .type = NLA_U32, }, - [IWL_TM_ATTR_FW_INST_SIZE] = { .type = NLA_U32, }, - [IWL_TM_ATTR_FW_DATA_SIZE] = { .type = NLA_U32, }, - - [IWL_TM_ATTR_ENABLE_NOTIFICATION] = {.type = NLA_FLAG, }, -}; - -static inline void iwl_test_trace_clear(struct iwl_test *tst) -{ - memset(&tst->trace, 0, sizeof(struct iwl_test_trace)); -} - -static void iwl_test_trace_stop(struct iwl_test *tst) -{ - if (!tst->trace.enabled) - return; - - if (tst->trace.cpu_addr && tst->trace.dma_addr) - dma_free_coherent(tst->trans->dev, - tst->trace.tsize, - tst->trace.cpu_addr, - tst->trace.dma_addr); - - iwl_test_trace_clear(tst); -} - -static inline void iwl_test_mem_clear(struct iwl_test *tst) -{ - memset(&tst->mem, 0, sizeof(struct iwl_test_mem)); -} - -static inline void iwl_test_mem_stop(struct iwl_test *tst) -{ - if (!tst->mem.in_read) - return; - - iwl_test_mem_clear(tst); -} - -/* - * Initializes the test object - * During the lifetime of the test object it is assumed that the transport is - * started. The test object should be stopped before the transport is stopped. - */ -void iwl_test_init(struct iwl_test *tst, struct iwl_trans *trans, - struct iwl_test_ops *ops) -{ - tst->trans = trans; - tst->ops = ops; - - iwl_test_trace_clear(tst); - iwl_test_mem_clear(tst); -} -EXPORT_SYMBOL_GPL(iwl_test_init); - -/* - * Stop the test object - */ -void iwl_test_free(struct iwl_test *tst) -{ - iwl_test_mem_stop(tst); - iwl_test_trace_stop(tst); -} -EXPORT_SYMBOL_GPL(iwl_test_free); - -static inline int iwl_test_send_cmd(struct iwl_test *tst, - struct iwl_host_cmd *cmd) -{ - return tst->ops->send_cmd(tst->trans->op_mode, cmd); -} - -static inline bool iwl_test_valid_hw_addr(struct iwl_test *tst, u32 addr) -{ - return tst->ops->valid_hw_addr(addr); -} - -static inline u32 iwl_test_fw_ver(struct iwl_test *tst) -{ - return tst->ops->get_fw_ver(tst->trans->op_mode); -} - -static inline struct sk_buff* -iwl_test_alloc_reply(struct iwl_test *tst, int len) -{ - return tst->ops->alloc_reply(tst->trans->op_mode, len); -} - -static inline int iwl_test_reply(struct iwl_test *tst, struct sk_buff *skb) -{ - return tst->ops->reply(tst->trans->op_mode, skb); -} - -static inline struct sk_buff* -iwl_test_alloc_event(struct iwl_test *tst, int len) -{ - return tst->ops->alloc_event(tst->trans->op_mode, len); -} - -static inline void -iwl_test_event(struct iwl_test *tst, struct sk_buff *skb) -{ - return tst->ops->event(tst->trans->op_mode, skb); -} - -/* - * This function handles the user application commands to the fw. The fw - * commands are sent in a synchronuous manner. In case that the user requested - * to get commands response, it is send to the user. - */ -static int iwl_test_fw_cmd(struct iwl_test *tst, struct nlattr **tb) -{ - struct iwl_host_cmd cmd; - struct iwl_rx_packet *pkt; - struct sk_buff *skb; - void *reply_buf; - u32 reply_len; - int ret; - bool cmd_want_skb; - - memset(&cmd, 0, sizeof(struct iwl_host_cmd)); - - if (!tb[IWL_TM_ATTR_UCODE_CMD_ID] || - !tb[IWL_TM_ATTR_UCODE_CMD_DATA]) { - IWL_ERR(tst->trans, "Missing fw command mandatory fields\n"); - return -ENOMSG; - } - - cmd.flags = CMD_ON_DEMAND | CMD_SYNC; - cmd_want_skb = nla_get_flag(tb[IWL_TM_ATTR_UCODE_CMD_SKB]); - if (cmd_want_skb) - cmd.flags |= CMD_WANT_SKB; - - cmd.id = nla_get_u8(tb[IWL_TM_ATTR_UCODE_CMD_ID]); - cmd.data[0] = nla_data(tb[IWL_TM_ATTR_UCODE_CMD_DATA]); - cmd.len[0] = nla_len(tb[IWL_TM_ATTR_UCODE_CMD_DATA]); - cmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY; - IWL_DEBUG_INFO(tst->trans, "test fw cmd=0x%x, flags 0x%x, len %d\n", - cmd.id, cmd.flags, cmd.len[0]); - - ret = iwl_test_send_cmd(tst, &cmd); - if (ret) { - IWL_ERR(tst->trans, "Failed to send hcmd\n"); - return ret; - } - if (!cmd_want_skb) - return ret; - - /* Handling return of SKB to the user */ - pkt = cmd.resp_pkt; - if (!pkt) { - IWL_ERR(tst->trans, "HCMD received a null response packet\n"); - return ret; - } - - reply_len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; - skb = iwl_test_alloc_reply(tst, reply_len + 20); - reply_buf = kmalloc(reply_len, GFP_KERNEL); - if (!skb || !reply_buf) { - kfree_skb(skb); - kfree(reply_buf); - return -ENOMEM; - } - - /* The reply is in a page, that we cannot send to user space. */ - memcpy(reply_buf, &(pkt->hdr), reply_len); - iwl_free_resp(&cmd); - - if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND, - IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) || - nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, reply_len, reply_buf)) - goto nla_put_failure; - return iwl_test_reply(tst, skb); - -nla_put_failure: - IWL_DEBUG_INFO(tst->trans, "Failed creating NL attributes\n"); - kfree(reply_buf); - kfree_skb(skb); - return -ENOMSG; -} - -/* - * Handles the user application commands for register access. - */ -static int iwl_test_reg(struct iwl_test *tst, struct nlattr **tb) -{ - u32 ofs, val32, cmd; - u8 val8; - struct sk_buff *skb; - int status = 0; - struct iwl_trans *trans = tst->trans; - - if (!tb[IWL_TM_ATTR_REG_OFFSET]) { - IWL_ERR(trans, "Missing reg offset\n"); - return -ENOMSG; - } - - ofs = nla_get_u32(tb[IWL_TM_ATTR_REG_OFFSET]); - IWL_DEBUG_INFO(trans, "test reg access cmd offset=0x%x\n", ofs); - - cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]); - - /* - * Allow access only to FH/CSR/HBUS in direct mode. - * Since we don't have the upper bounds for the CSR and HBUS segments, - * we will use only the upper bound of FH for sanity check. - */ - if (ofs >= FH_MEM_UPPER_BOUND) { - IWL_ERR(trans, "offset out of segment (0x0 - 0x%x)\n", - FH_MEM_UPPER_BOUND); - return -EINVAL; - } - - switch (cmd) { - case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32: - val32 = iwl_read_direct32(tst->trans, ofs); - IWL_DEBUG_INFO(trans, "32 value to read 0x%x\n", val32); - - skb = iwl_test_alloc_reply(tst, 20); - if (!skb) { - IWL_ERR(trans, "Memory allocation fail\n"); - return -ENOMEM; - } - if (nla_put_u32(skb, IWL_TM_ATTR_REG_VALUE32, val32)) - goto nla_put_failure; - status = iwl_test_reply(tst, skb); - if (status < 0) - IWL_ERR(trans, "Error sending msg : %d\n", status); - break; - - case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32: - if (!tb[IWL_TM_ATTR_REG_VALUE32]) { - IWL_ERR(trans, "Missing value to write\n"); - return -ENOMSG; - } else { - val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]); - IWL_DEBUG_INFO(trans, "32b write val=0x%x\n", val32); - iwl_write_direct32(tst->trans, ofs, val32); - } - break; - - case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8: - if (!tb[IWL_TM_ATTR_REG_VALUE8]) { - IWL_ERR(trans, "Missing value to write\n"); - return -ENOMSG; - } else { - val8 = nla_get_u8(tb[IWL_TM_ATTR_REG_VALUE8]); - IWL_DEBUG_INFO(trans, "8b write val=0x%x\n", val8); - iwl_write8(tst->trans, ofs, val8); - } - break; - - default: - IWL_ERR(trans, "Unknown test register cmd ID\n"); - return -ENOMSG; - } - - return status; - -nla_put_failure: - kfree_skb(skb); - return -EMSGSIZE; -} - -/* - * Handles the request to start FW tracing. Allocates of the trace buffer - * and sends a reply to user space with the address of the allocated buffer. - */ -static int iwl_test_trace_begin(struct iwl_test *tst, struct nlattr **tb) -{ - struct sk_buff *skb; - int status = 0; - - if (tst->trace.enabled) - return -EBUSY; - - if (!tb[IWL_TM_ATTR_TRACE_SIZE]) - tst->trace.size = TRACE_BUFF_SIZE_DEF; - else - tst->trace.size = - nla_get_u32(tb[IWL_TM_ATTR_TRACE_SIZE]); - - if (!tst->trace.size) - return -EINVAL; - - if (tst->trace.size < TRACE_BUFF_SIZE_MIN || - tst->trace.size > TRACE_BUFF_SIZE_MAX) - return -EINVAL; - - tst->trace.tsize = tst->trace.size + TRACE_BUFF_PADD; - tst->trace.cpu_addr = dma_alloc_coherent(tst->trans->dev, - tst->trace.tsize, - &tst->trace.dma_addr, - GFP_KERNEL); - if (!tst->trace.cpu_addr) - return -ENOMEM; - - tst->trace.enabled = true; - tst->trace.trace_addr = (u8 *)PTR_ALIGN(tst->trace.cpu_addr, 0x100); - - memset(tst->trace.trace_addr, 0x03B, tst->trace.size); - - skb = iwl_test_alloc_reply(tst, sizeof(tst->trace.dma_addr) + 20); - if (!skb) { - IWL_ERR(tst->trans, "Memory allocation fail\n"); - iwl_test_trace_stop(tst); - return -ENOMEM; - } - - if (nla_put(skb, IWL_TM_ATTR_TRACE_ADDR, - sizeof(tst->trace.dma_addr), - (u64 *)&tst->trace.dma_addr)) - goto nla_put_failure; - - status = iwl_test_reply(tst, skb); - if (status < 0) - IWL_ERR(tst->trans, "Error sending msg : %d\n", status); - - tst->trace.nchunks = DIV_ROUND_UP(tst->trace.size, - DUMP_CHUNK_SIZE); - - return status; - -nla_put_failure: - kfree_skb(skb); - if (nla_get_u32(tb[IWL_TM_ATTR_COMMAND]) == - IWL_TM_CMD_APP2DEV_BEGIN_TRACE) - iwl_test_trace_stop(tst); - return -EMSGSIZE; -} - -/* - * Handles indirect read from the periphery or the SRAM. The read is performed - * to a temporary buffer. The user space application should later issue a dump - */ -static int iwl_test_indirect_read(struct iwl_test *tst, u32 addr, u32 size) -{ - struct iwl_trans *trans = tst->trans; - unsigned long flags; - int i; - - if (size & 0x3) - return -EINVAL; - - tst->mem.size = size; - tst->mem.addr = kmalloc(tst->mem.size, GFP_KERNEL); - if (tst->mem.addr == NULL) - return -ENOMEM; - - /* Hard-coded periphery absolute address */ - if (IWL_ABS_PRPH_START <= addr && - addr < IWL_ABS_PRPH_START + PRPH_END) { - spin_lock_irqsave(&trans->reg_lock, flags); - iwl_grab_nic_access(trans); - iwl_write32(trans, HBUS_TARG_PRPH_RADDR, - addr | (3 << 24)); - for (i = 0; i < size; i += 4) - *(u32 *)(tst->mem.addr + i) = - iwl_read32(trans, HBUS_TARG_PRPH_RDAT); - iwl_release_nic_access(trans); - spin_unlock_irqrestore(&trans->reg_lock, flags); - } else { /* target memory (SRAM) */ - _iwl_read_targ_mem_dwords(trans, addr, - tst->mem.addr, - tst->mem.size / 4); - } - - tst->mem.nchunks = - DIV_ROUND_UP(tst->mem.size, DUMP_CHUNK_SIZE); - tst->mem.in_read = true; - return 0; - -} - -/* - * Handles indirect write to the periphery or SRAM. The is performed to a - * temporary buffer. - */ -static int iwl_test_indirect_write(struct iwl_test *tst, u32 addr, - u32 size, unsigned char *buf) -{ - struct iwl_trans *trans = tst->trans; - u32 val, i; - unsigned long flags; - - if (IWL_ABS_PRPH_START <= addr && - addr < IWL_ABS_PRPH_START + PRPH_END) { - /* Periphery writes can be 1-3 bytes long, or DWORDs */ - if (size < 4) { - memcpy(&val, buf, size); - spin_lock_irqsave(&trans->reg_lock, flags); - iwl_grab_nic_access(trans); - iwl_write32(trans, HBUS_TARG_PRPH_WADDR, - (addr & 0x0000FFFF) | - ((size - 1) << 24)); - iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val); - iwl_release_nic_access(trans); - /* needed after consecutive writes w/o read */ - mmiowb(); - spin_unlock_irqrestore(&trans->reg_lock, flags); - } else { - if (size % 4) - return -EINVAL; - for (i = 0; i < size; i += 4) - iwl_write_prph(trans, addr+i, - *(u32 *)(buf+i)); - } - } else if (iwl_test_valid_hw_addr(tst, addr)) { - _iwl_write_targ_mem_dwords(trans, addr, buf, size / 4); - } else { - return -EINVAL; - } - return 0; -} - -/* - * Handles the user application commands for indirect read/write - * to/from the periphery or the SRAM. - */ -static int iwl_test_indirect_mem(struct iwl_test *tst, struct nlattr **tb) -{ - u32 addr, size, cmd; - unsigned char *buf; - - /* Both read and write should be blocked, for atomicity */ - if (tst->mem.in_read) - return -EBUSY; - - cmd = nla_get_u32(tb[IWL_TM_ATTR_COMMAND]); - if (!tb[IWL_TM_ATTR_MEM_ADDR]) { - IWL_ERR(tst->trans, "Error finding memory offset address\n"); - return -ENOMSG; - } - addr = nla_get_u32(tb[IWL_TM_ATTR_MEM_ADDR]); - if (!tb[IWL_TM_ATTR_BUFFER_SIZE]) { - IWL_ERR(tst->trans, "Error finding size for memory reading\n"); - return -ENOMSG; - } - size = nla_get_u32(tb[IWL_TM_ATTR_BUFFER_SIZE]); - - if (cmd == IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ) { - return iwl_test_indirect_read(tst, addr, size); - } else { - if (!tb[IWL_TM_ATTR_BUFFER_DUMP]) - return -EINVAL; - buf = (unsigned char *)nla_data(tb[IWL_TM_ATTR_BUFFER_DUMP]); - return iwl_test_indirect_write(tst, addr, size, buf); - } -} - -/* - * Enable notifications to user space - */ -static int iwl_test_notifications(struct iwl_test *tst, - struct nlattr **tb) -{ - tst->notify = nla_get_flag(tb[IWL_TM_ATTR_ENABLE_NOTIFICATION]); - return 0; -} - -/* - * Handles the request to get the device id - */ -static int iwl_test_get_dev_id(struct iwl_test *tst, struct nlattr **tb) -{ - u32 devid = tst->trans->hw_id; - struct sk_buff *skb; - int status; - - IWL_DEBUG_INFO(tst->trans, "hw version: 0x%x\n", devid); - - skb = iwl_test_alloc_reply(tst, 20); - if (!skb) { - IWL_ERR(tst->trans, "Memory allocation fail\n"); - return -ENOMEM; - } - - if (nla_put_u32(skb, IWL_TM_ATTR_DEVICE_ID, devid)) - goto nla_put_failure; - status = iwl_test_reply(tst, skb); - if (status < 0) - IWL_ERR(tst->trans, "Error sending msg : %d\n", status); - - return 0; - -nla_put_failure: - kfree_skb(skb); - return -EMSGSIZE; -} - -/* - * Handles the request to get the FW version - */ -static int iwl_test_get_fw_ver(struct iwl_test *tst, struct nlattr **tb) -{ - struct sk_buff *skb; - int status; - u32 ver = iwl_test_fw_ver(tst); - - IWL_DEBUG_INFO(tst->trans, "uCode version raw: 0x%x\n", ver); - - skb = iwl_test_alloc_reply(tst, 20); - if (!skb) { - IWL_ERR(tst->trans, "Memory allocation fail\n"); - return -ENOMEM; - } - - if (nla_put_u32(skb, IWL_TM_ATTR_FW_VERSION, ver)) - goto nla_put_failure; - - status = iwl_test_reply(tst, skb); - if (status < 0) - IWL_ERR(tst->trans, "Error sending msg : %d\n", status); - - return 0; - -nla_put_failure: - kfree_skb(skb); - return -EMSGSIZE; -} - -/* - * Parse the netlink message and validate that the IWL_TM_ATTR_CMD exists - */ -int iwl_test_parse(struct iwl_test *tst, struct nlattr **tb, - void *data, int len) -{ - int result; - - result = nla_parse(tb, IWL_TM_ATTR_MAX - 1, data, len, - iwl_testmode_gnl_msg_policy); - if (result) { - IWL_ERR(tst->trans, "Fail parse gnl msg: %d\n", result); - return result; - } - - /* IWL_TM_ATTR_COMMAND is absolutely mandatory */ - if (!tb[IWL_TM_ATTR_COMMAND]) { - IWL_ERR(tst->trans, "Missing testmode command type\n"); - return -ENOMSG; - } - return 0; -} -EXPORT_SYMBOL_GPL(iwl_test_parse); - -/* - * Handle test commands. - * Returns 1 for unknown commands (not handled by the test object); negative - * value in case of error. - */ -int iwl_test_handle_cmd(struct iwl_test *tst, struct nlattr **tb) -{ - int result; - - switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) { - case IWL_TM_CMD_APP2DEV_UCODE: - IWL_DEBUG_INFO(tst->trans, "test cmd to uCode\n"); - result = iwl_test_fw_cmd(tst, tb); - break; - - case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32: - case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32: - case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8: - IWL_DEBUG_INFO(tst->trans, "test cmd to register\n"); - result = iwl_test_reg(tst, tb); - break; - - case IWL_TM_CMD_APP2DEV_BEGIN_TRACE: - IWL_DEBUG_INFO(tst->trans, "test uCode trace cmd to driver\n"); - result = iwl_test_trace_begin(tst, tb); - break; - - case IWL_TM_CMD_APP2DEV_END_TRACE: - iwl_test_trace_stop(tst); - result = 0; - break; - - case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_READ: - case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_WRITE: - IWL_DEBUG_INFO(tst->trans, "test indirect memory cmd\n"); - result = iwl_test_indirect_mem(tst, tb); - break; - - case IWL_TM_CMD_APP2DEV_NOTIFICATIONS: - IWL_DEBUG_INFO(tst->trans, "test notifications cmd\n"); - result = iwl_test_notifications(tst, tb); - break; - - case IWL_TM_CMD_APP2DEV_GET_FW_VERSION: - IWL_DEBUG_INFO(tst->trans, "test get FW ver cmd\n"); - result = iwl_test_get_fw_ver(tst, tb); - break; - - case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID: - IWL_DEBUG_INFO(tst->trans, "test Get device ID cmd\n"); - result = iwl_test_get_dev_id(tst, tb); - break; - - default: - IWL_DEBUG_INFO(tst->trans, "Unknown test command\n"); - result = 1; - break; - } - return result; -} -EXPORT_SYMBOL_GPL(iwl_test_handle_cmd); - -static int iwl_test_trace_dump(struct iwl_test *tst, struct sk_buff *skb, - struct netlink_callback *cb) -{ - int idx, length; - - if (!tst->trace.enabled || !tst->trace.trace_addr) - return -EFAULT; - - idx = cb->args[4]; - if (idx >= tst->trace.nchunks) - return -ENOENT; - - length = DUMP_CHUNK_SIZE; - if (((idx + 1) == tst->trace.nchunks) && - (tst->trace.size % DUMP_CHUNK_SIZE)) - length = tst->trace.size % - DUMP_CHUNK_SIZE; - - if (nla_put(skb, IWL_TM_ATTR_TRACE_DUMP, length, - tst->trace.trace_addr + (DUMP_CHUNK_SIZE * idx))) - goto nla_put_failure; - - cb->args[4] = ++idx; - return 0; - - nla_put_failure: - return -ENOBUFS; -} - -static int iwl_test_buffer_dump(struct iwl_test *tst, struct sk_buff *skb, - struct netlink_callback *cb) -{ - int idx, length; - - if (!tst->mem.in_read) - return -EFAULT; - - idx = cb->args[4]; - if (idx >= tst->mem.nchunks) { - iwl_test_mem_stop(tst); - return -ENOENT; - } - - length = DUMP_CHUNK_SIZE; - if (((idx + 1) == tst->mem.nchunks) && - (tst->mem.size % DUMP_CHUNK_SIZE)) - length = tst->mem.size % DUMP_CHUNK_SIZE; - - if (nla_put(skb, IWL_TM_ATTR_BUFFER_DUMP, length, - tst->mem.addr + (DUMP_CHUNK_SIZE * idx))) - goto nla_put_failure; - - cb->args[4] = ++idx; - return 0; - - nla_put_failure: - return -ENOBUFS; -} - -/* - * Handle dump commands. - * Returns 1 for unknown commands (not handled by the test object); negative - * value in case of error. - */ -int iwl_test_dump(struct iwl_test *tst, u32 cmd, struct sk_buff *skb, - struct netlink_callback *cb) -{ - int result; - - switch (cmd) { - case IWL_TM_CMD_APP2DEV_READ_TRACE: - IWL_DEBUG_INFO(tst->trans, "uCode trace cmd\n"); - result = iwl_test_trace_dump(tst, skb, cb); - break; - - case IWL_TM_CMD_APP2DEV_INDIRECT_BUFFER_DUMP: - IWL_DEBUG_INFO(tst->trans, "testmode sram dump cmd\n"); - result = iwl_test_buffer_dump(tst, skb, cb); - break; - - default: - result = 1; - break; - } - return result; -} -EXPORT_SYMBOL_GPL(iwl_test_dump); - -/* - * Multicast a spontaneous messages from the device to the user space. - */ -static void iwl_test_send_rx(struct iwl_test *tst, - struct iwl_rx_cmd_buffer *rxb) -{ - struct sk_buff *skb; - struct iwl_rx_packet *data; - int length; - - data = rxb_addr(rxb); - length = le32_to_cpu(data->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; - - /* the length doesn't include len_n_flags field, so add it manually */ - length += sizeof(__le32); - - skb = iwl_test_alloc_event(tst, length + 20); - if (skb == NULL) { - IWL_ERR(tst->trans, "Out of memory for message to user\n"); - return; - } - - if (nla_put_u32(skb, IWL_TM_ATTR_COMMAND, - IWL_TM_CMD_DEV2APP_UCODE_RX_PKT) || - nla_put(skb, IWL_TM_ATTR_UCODE_RX_PKT, length, data)) - goto nla_put_failure; - - iwl_test_event(tst, skb); - return; - -nla_put_failure: - kfree_skb(skb); - IWL_ERR(tst->trans, "Ouch, overran buffer, check allocation!\n"); -} - -/* - * Called whenever a Rx frames is recevied from the device. If notifications to - * the user space are requested, sends the frames to the user. - */ -void iwl_test_rx(struct iwl_test *tst, struct iwl_rx_cmd_buffer *rxb) -{ - if (tst->notify) - iwl_test_send_rx(tst, rxb); -} -EXPORT_SYMBOL_GPL(iwl_test_rx); diff --git a/trunk/drivers/net/wireless/iwlwifi/iwl-test.h b/trunk/drivers/net/wireless/iwlwifi/iwl-test.h deleted file mode 100644 index e13ffa8acc02..000000000000 --- a/trunk/drivers/net/wireless/iwlwifi/iwl-test.h +++ /dev/null @@ -1,161 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA - * - * The full GNU General Public License is included in this distribution - * in the file called LICENSE.GPL. - * - * Contact Information: - * Intel Linux Wireless - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2010 - 2012 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ - -#ifndef __IWL_TEST_H__ -#define __IWL_TEST_H__ - -#include -#include "iwl-trans.h" - -struct iwl_test_trace { - u32 size; - u32 tsize; - u32 nchunks; - u8 *cpu_addr; - u8 *trace_addr; - dma_addr_t dma_addr; - bool enabled; -}; - -struct iwl_test_mem { - u32 size; - u32 nchunks; - u8 *addr; - bool in_read; -}; - -/* - * struct iwl_test_ops: callback to the op mode - * - * The structure defines the callbacks that the op_mode should handle, - * inorder to handle logic that is out of the scope of iwl_test. The - * op_mode must set all the callbacks. - - * @send_cmd: handler that is used by the test object to request the - * op_mode to send a command to the fw. - * - * @valid_hw_addr: handler that is used by the test object to request the - * op_mode to check if the given address is a valid address. - * - * @get_fw_ver: handler used to get the FW version. - * - * @alloc_reply: handler used by the test object to request the op_mode - * to allocate an skb for sending a reply to the user, and initialize - * the skb. It is assumed that the test object only fills the required - * attributes. - * - * @reply: handler used by the test object to request the op_mode to reply - * to a request. The skb is an skb previously allocated by the the - * alloc_reply callback. - I - * @alloc_event: handler used by the test object to request the op_mode - * to allocate an skb for sending an event, and initialize - * the skb. It is assumed that the test object only fills the required - * attributes. - * - * @reply: handler used by the test object to request the op_mode to send - * an event. The skb is an skb previously allocated by the the - * alloc_event callback. - */ -struct iwl_test_ops { - int (*send_cmd)(struct iwl_op_mode *op_modes, - struct iwl_host_cmd *cmd); - bool (*valid_hw_addr)(u32 addr); - u32 (*get_fw_ver)(struct iwl_op_mode *op_mode); - - struct sk_buff *(*alloc_reply)(struct iwl_op_mode *op_mode, int len); - int (*reply)(struct iwl_op_mode *op_mode, struct sk_buff *skb); - struct sk_buff* (*alloc_event)(struct iwl_op_mode *op_mode, int len); - void (*event)(struct iwl_op_mode *op_mode, struct sk_buff *skb); -}; - -struct iwl_test { - struct iwl_trans *trans; - struct iwl_test_ops *ops; - struct iwl_test_trace trace; - struct iwl_test_mem mem; - bool notify; -}; - -void iwl_test_init(struct iwl_test *tst, struct iwl_trans *trans, - struct iwl_test_ops *ops); - -void iwl_test_free(struct iwl_test *tst); - -int iwl_test_parse(struct iwl_test *tst, struct nlattr **tb, - void *data, int len); - -int iwl_test_handle_cmd(struct iwl_test *tst, struct nlattr **tb); - -int iwl_test_dump(struct iwl_test *tst, u32 cmd, struct sk_buff *skb, - struct netlink_callback *cb); - -void iwl_test_rx(struct iwl_test *tst, struct iwl_rx_cmd_buffer *rxb); - -static inline void iwl_test_enable_notifications(struct iwl_test *tst, - bool enable) -{ - tst->notify = enable; -} - -#endif diff --git a/trunk/drivers/net/wireless/iwlwifi/pcie/6000.c b/trunk/drivers/net/wireless/iwlwifi/pcie/6000.c index 4a57624afc40..cb08ba03aae7 100644 --- a/trunk/drivers/net/wireless/iwlwifi/pcie/6000.c +++ b/trunk/drivers/net/wireless/iwlwifi/pcie/6000.c @@ -258,7 +258,6 @@ const struct iwl_cfg iwl6030_2bg_cfg = { .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \ .base_params = &iwl6000_g2_base_params, \ .bt_params = &iwl6000_bt_params, \ - .eeprom_params = &iwl6000_eeprom_params, \ .need_temp_offset_calib = true, \ .led_mode = IWL_LED_RF_STATE, \ .adv_pm = true diff --git a/trunk/drivers/net/wireless/iwlwifi/pcie/internal.h b/trunk/drivers/net/wireless/iwlwifi/pcie/internal.h index 5024fb662bf6..94201c4d6227 100644 --- a/trunk/drivers/net/wireless/iwlwifi/pcie/internal.h +++ b/trunk/drivers/net/wireless/iwlwifi/pcie/internal.h @@ -339,9 +339,16 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans, void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans, struct iwl_tx_queue *txq, u16 byte_cnt); +void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue); +void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index); +void iwl_trans_tx_queue_set_status(struct iwl_trans *trans, + struct iwl_tx_queue *txq, + int tx_fifo_id, bool active); +void __iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, + int fifo, int sta_id, int tid, + int frame_limit, u16 ssn); void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, int sta_id, int tid, int frame_limit, u16 ssn); -void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue); void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq, enum dma_data_direction dma_dir); int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index, diff --git a/trunk/drivers/net/wireless/iwlwifi/pcie/trans.c b/trunk/drivers/net/wireless/iwlwifi/pcie/trans.c index 32ab8ea56135..7461a6a14338 100644 --- a/trunk/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/trunk/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -298,10 +298,6 @@ static void iwl_trans_pcie_queue_stuck_timer(unsigned long data) struct iwl_tx_queue *txq = (void *)data; struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie); - u32 scd_sram_addr = trans_pcie->scd_base_addr + - SCD_TX_STTS_MEM_LOWER_BOUND + (16 * txq->q.id); - u8 buf[16]; - int i; spin_lock(&txq->lock); /* check if triggered erroneously */ @@ -311,40 +307,15 @@ static void iwl_trans_pcie_queue_stuck_timer(unsigned long data) } spin_unlock(&txq->lock); + IWL_ERR(trans, "Queue %d stuck for %u ms.\n", txq->q.id, jiffies_to_msecs(trans_pcie->wd_timeout)); IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n", txq->q.read_ptr, txq->q.write_ptr); - - iwl_read_targ_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf)); - - iwl_print_hex_error(trans, buf, sizeof(buf)); - - for (i = 0; i < FH_TCSR_CHNL_NUM; i++) - IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", i, - iwl_read_direct32(trans, FH_TX_TRB_REG(i))); - - for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { - u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(i)); - u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7; - bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE)); - u32 tbl_dw = - iwl_read_targ_mem(trans, - trans_pcie->scd_base_addr + - SCD_TRANS_TBL_OFFSET_QUEUE(i)); - - if (i & 0x1) - tbl_dw = (tbl_dw & 0xFFFF0000) >> 16; - else - tbl_dw = tbl_dw & 0x0000FFFF; - - IWL_ERR(trans, - "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n", - i, active ? "" : "in", fifo, tbl_dw, - iwl_read_prph(trans, - SCD_QUEUE_RDPTR(i)) & (txq->q.n_bd - 1), - iwl_read_prph(trans, SCD_QUEUE_WRPTR(i))); - } + IWL_ERR(trans, "Current HW read_ptr %d write_ptr %d\n", + iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq->q.id)) + & (TFD_QUEUE_SIZE_MAX - 1), + iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq->q.id))); iwl_op_mode_nic_error(trans->op_mode); } @@ -1083,21 +1054,23 @@ static void iwl_tx_start(struct iwl_trans *trans) iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, trans_pcie->scd_bc_tbls.dma >> 10); - /* The chain extension of the SCD doesn't work well. This feature is - * enabled by default by the HW, so we need to disable it manually. - */ - iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); - for (i = 0; i < trans_pcie->n_q_to_fifo; i++) { int fifo = trans_pcie->setup_q_to_fifo[i]; - iwl_trans_pcie_txq_enable(trans, i, fifo, IWL_INVALID_STATION, - IWL_TID_NON_QOS, SCD_FRAME_LIMIT, 0); + __iwl_trans_pcie_txq_enable(trans, i, fifo, IWL_INVALID_STATION, + IWL_TID_NON_QOS, + SCD_FRAME_LIMIT, 0); } /* Activate all Tx DMA/FIFO channels */ iwl_trans_txq_set_sched(trans, IWL_MASK(0, 7)); + /* The chain extension of the SCD doesn't work well. This feature is + * enabled by default by the HW, so we need to disable it manually. + */ + iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); + + /* Enable DMA channel */ for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++) iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan), @@ -1266,19 +1239,6 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, spin_lock(&txq->lock); - /* In AGG mode, the index in the ring must correspond to the WiFi - * sequence number. This is a HW requirements to help the SCD to parse - * the BA. - * Check here that the packets are in the right place on the ring. - */ -#ifdef CONFIG_IWLWIFI_DEBUG - wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); - WARN_ONCE((iwl_read_prph(trans, SCD_AGGR_SEL) & BIT(txq_id)) && - ((wifi_seq & 0xff) != q->write_ptr), - "Q: %d WiFi Seq %d tfdNum %d", - txq_id, wifi_seq, q->write_ptr); -#endif - /* Set up driver data for this TFD */ txq->entries[q->write_ptr].skb = skb; txq->entries[q->write_ptr].cmd = dev_cmd; @@ -1372,8 +1332,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, skb->data + hdr_len, secondlen); /* start timer if queue currently empty */ - if (txq->need_update && q->read_ptr == q->write_ptr && - trans_pcie->wd_timeout) + if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout) mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout); /* Tell device the write index *just past* this latest filled TFD */ diff --git a/trunk/drivers/net/wireless/iwlwifi/pcie/tx.c b/trunk/drivers/net/wireless/iwlwifi/pcie/tx.c index 6baf8deef519..35e82161ca43 100644 --- a/trunk/drivers/net/wireless/iwlwifi/pcie/tx.c +++ b/trunk/drivers/net/wireless/iwlwifi/pcie/tx.c @@ -380,8 +380,8 @@ static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans, tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent; } -static int iwl_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, - u16 txq_id) +static int iwlagn_tx_queue_set_q2ratid(struct iwl_trans *trans, u16 ra_tid, + u16 txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 tbl_dw_addr; @@ -405,7 +405,7 @@ static int iwl_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, return 0; } -static inline void iwl_txq_set_inactive(struct iwl_trans *trans, u16 txq_id) +static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id) { /* Simply stop the queue, but don't change any configuration; * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ @@ -415,16 +415,46 @@ static inline void iwl_txq_set_inactive(struct iwl_trans *trans, u16 txq_id) (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); } -void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, - int sta_id, int tid, int frame_limit, u16 ssn) +void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index) +{ + IWL_DEBUG_TX_QUEUES(trans, "Q %d WrPtr: %d\n", txq_id, index & 0xff); + iwl_write_direct32(trans, HBUS_TARG_WRPTR, + (index & 0xff) | (txq_id << 8)); + iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), index); +} + +void iwl_trans_tx_queue_set_status(struct iwl_trans *trans, + struct iwl_tx_queue *txq, + int tx_fifo_id, bool active) +{ + int txq_id = txq->q.id; + + iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), + (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) | + (tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) | + (1 << SCD_QUEUE_STTS_REG_POS_WSL) | + SCD_QUEUE_STTS_REG_MSK); + + if (active) + IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d\n", + txq_id, tx_fifo_id); + else + IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); +} + +void __iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, + int fifo, int sta_id, int tid, + int frame_limit, u16 ssn) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + lockdep_assert_held(&trans_pcie->irq_lock); + if (test_and_set_bit(txq_id, trans_pcie->queue_used)) WARN_ONCE(1, "queue %d already used - expect issues", txq_id); /* Stop this Tx queue before configuring it */ - iwl_txq_set_inactive(trans, txq_id); + iwlagn_tx_queue_stop_scheduler(trans, txq_id); /* Set this queue as a chain-building queue unless it is CMD queue */ if (txq_id != trans_pcie->cmd_queue) @@ -435,27 +465,17 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, u16 ra_tid = BUILD_RAxTID(sta_id, tid); /* Map receiver-address / traffic-ID to this queue */ - iwl_txq_set_ratid_map(trans, ra_tid, txq_id); + iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id); /* enable aggregations for the queue */ iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); - } else { - /* - * disable aggregations for the queue, this will also make the - * ra_tid mapping configuration irrelevant since it is now a - * non-AGG queue. - */ - iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); } /* Place first TFD at index corresponding to start sequence number. * Assumes that ssn_idx is valid (!= 0xFFF) */ trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff); trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff); - - iwl_write_direct32(trans, HBUS_TARG_WRPTR, - (ssn & 0xff) | (txq_id << 8)); - iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn); + iwl_trans_set_wr_ptrs(trans, txq_id, ssn); /* Set up Tx window size and frame limit for this queue */ iwl_write_targ_mem(trans, trans_pcie->scd_base_addr + @@ -468,34 +488,43 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ - iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), - (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) | - (fifo << SCD_QUEUE_STTS_REG_POS_TXF) | - (1 << SCD_QUEUE_STTS_REG_POS_WSL) | - SCD_QUEUE_STTS_REG_MSK); - IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d on FIFO %d WrPtr: %d\n", - txq_id, fifo, ssn & 0xff); + iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], + fifo, true); +} + +void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, + int sta_id, int tid, int frame_limit, u16 ssn) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + unsigned long flags; + + spin_lock_irqsave(&trans_pcie->irq_lock, flags); + + __iwl_trans_pcie_txq_enable(trans, txq_id, fifo, sta_id, + tid, frame_limit, ssn); + + spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); } void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - u16 rd_ptr, wr_ptr; - int n_bd = trans_pcie->txq[txq_id].q.n_bd; if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) { WARN_ONCE(1, "queue %d not used", txq_id); return; } - rd_ptr = iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & (n_bd - 1); - wr_ptr = iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)); + iwlagn_tx_queue_stop_scheduler(trans, txq_id); + + iwl_clear_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); - WARN_ONCE(rd_ptr != wr_ptr, "queue %d isn't empty: [%d,%d]", - txq_id, rd_ptr, wr_ptr); + trans_pcie->txq[txq_id].q.read_ptr = 0; + trans_pcie->txq[txq_id].q.write_ptr = 0; + iwl_trans_set_wr_ptrs(trans, txq_id, 0); - iwl_txq_set_inactive(trans, txq_id); - IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); + iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], + 0, false); } /*************** HOST COMMAND QUEUE FUNCTIONS *****/ diff --git a/trunk/drivers/net/wireless/mwifiex/uap_cmd.c b/trunk/drivers/net/wireless/mwifiex/uap_cmd.c index 89f9a2a45de3..8173ab66066d 100644 --- a/trunk/drivers/net/wireless/mwifiex/uap_cmd.c +++ b/trunk/drivers/net/wireless/mwifiex/uap_cmd.c @@ -27,17 +27,6 @@ int mwifiex_set_secure_params(struct mwifiex_private *priv, struct cfg80211_ap_settings *params) { int i; - if (!params->privacy) { - bss_config->protocol = PROTOCOL_NO_SECURITY; - bss_config->key_mgmt = KEY_MGMT_NONE; - bss_config->wpa_cfg.length = 0; - priv->sec_info.wep_enabled = 0; - priv->sec_info.wpa_enabled = 0; - priv->sec_info.wpa2_enabled = 0; - - return 0; - } - switch (params->auth_type) { case NL80211_AUTHTYPE_OPEN_SYSTEM: bss_config->auth_mode = WLAN_AUTH_OPEN; diff --git a/trunk/drivers/net/wireless/rndis_wlan.c b/trunk/drivers/net/wireless/rndis_wlan.c index dfcd02ab6cae..2e9e6af21362 100644 --- a/trunk/drivers/net/wireless/rndis_wlan.c +++ b/trunk/drivers/net/wireless/rndis_wlan.c @@ -2110,7 +2110,7 @@ static int rndis_check_bssid_list(struct usbnet *usbdev, u8 *match_bssid, while (check_bssid_list_item(bssid, bssid_len, buf, len)) { if (rndis_bss_info_update(usbdev, bssid) && match_bssid && matched) { - if (ether_addr_equal(bssid->mac, match_bssid)) + if (!ether_addr_equal(bssid->mac, match_bssid)) *matched = true; } diff --git a/trunk/include/net/bluetooth/a2mp.h b/trunk/include/net/bluetooth/a2mp.h deleted file mode 100644 index 6a76e0a0705e..000000000000 --- a/trunk/include/net/bluetooth/a2mp.h +++ /dev/null @@ -1,126 +0,0 @@ -/* - Copyright (c) 2010,2011 Code Aurora Forum. All rights reserved. - Copyright (c) 2011,2012 Intel Corp. - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License version 2 and - only version 2 as published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. -*/ - -#ifndef __A2MP_H -#define __A2MP_H - -#include - -#define A2MP_FEAT_EXT 0x8000 - -struct amp_mgr { - struct l2cap_conn *l2cap_conn; - struct l2cap_chan *a2mp_chan; - struct kref kref; - __u8 ident; - __u8 handle; - unsigned long flags; -}; - -struct a2mp_cmd { - __u8 code; - __u8 ident; - __le16 len; - __u8 data[0]; -} __packed; - -/* A2MP command codes */ -#define A2MP_COMMAND_REJ 0x01 -struct a2mp_cmd_rej { - __le16 reason; - __u8 data[0]; -} __packed; - -#define A2MP_DISCOVER_REQ 0x02 -struct a2mp_discov_req { - __le16 mtu; - __le16 ext_feat; -} __packed; - -struct a2mp_cl { - __u8 id; - __u8 type; - __u8 status; -} __packed; - -#define A2MP_DISCOVER_RSP 0x03 -struct a2mp_discov_rsp { - __le16 mtu; - __le16 ext_feat; - struct a2mp_cl cl[0]; -} __packed; - -#define A2MP_CHANGE_NOTIFY 0x04 -#define A2MP_CHANGE_RSP 0x05 - -#define A2MP_GETINFO_REQ 0x06 -struct a2mp_info_req { - __u8 id; -} __packed; - -#define A2MP_GETINFO_RSP 0x07 -struct a2mp_info_rsp { - __u8 id; - __u8 status; - __le32 total_bw; - __le32 max_bw; - __le32 min_latency; - __le16 pal_cap; - __le16 assoc_size; -} __packed; - -#define A2MP_GETAMPASSOC_REQ 0x08 -struct a2mp_amp_assoc_req { - __u8 id; -} __packed; - -#define A2MP_GETAMPASSOC_RSP 0x09 -struct a2mp_amp_assoc_rsp { - __u8 id; - __u8 status; - __u8 amp_assoc[0]; -} __packed; - -#define A2MP_CREATEPHYSLINK_REQ 0x0A -#define A2MP_DISCONNPHYSLINK_REQ 0x0C -struct a2mp_physlink_req { - __u8 local_id; - __u8 remote_id; - __u8 amp_assoc[0]; -} __packed; - -#define A2MP_CREATEPHYSLINK_RSP 0x0B -#define A2MP_DISCONNPHYSLINK_RSP 0x0D -struct a2mp_physlink_rsp { - __u8 local_id; - __u8 remote_id; - __u8 status; -} __packed; - -/* A2MP response status */ -#define A2MP_STATUS_SUCCESS 0x00 -#define A2MP_STATUS_INVALID_CTRL_ID 0x01 -#define A2MP_STATUS_UNABLE_START_LINK_CREATION 0x02 -#define A2MP_STATUS_NO_PHYSICAL_LINK_EXISTS 0x02 -#define A2MP_STATUS_COLLISION_OCCURED 0x03 -#define A2MP_STATUS_DISCONN_REQ_RECVD 0x04 -#define A2MP_STATUS_PHYS_LINK_EXISTS 0x05 -#define A2MP_STATUS_SECURITY_VIOLATION 0x06 - -void amp_mgr_get(struct amp_mgr *mgr); -int amp_mgr_put(struct amp_mgr *mgr); -struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn, - struct sk_buff *skb); - -#endif /* __A2MP_H */ diff --git a/trunk/include/net/bluetooth/bluetooth.h b/trunk/include/net/bluetooth/bluetooth.h index 565d4bee1e49..961669b648fd 100644 --- a/trunk/include/net/bluetooth/bluetooth.h +++ b/trunk/include/net/bluetooth/bluetooth.h @@ -1,4 +1,4 @@ -/* +/* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2000-2001 Qualcomm Incorporated @@ -12,19 +12,22 @@ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY - CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, - COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS SOFTWARE IS DISCLAIMED. */ #ifndef __BLUETOOTH_H #define __BLUETOOTH_H +#include +#include +#include #include #include @@ -165,8 +168,8 @@ typedef struct { #define BDADDR_LE_PUBLIC 0x01 #define BDADDR_LE_RANDOM 0x02 -#define BDADDR_ANY (&(bdaddr_t) {{0, 0, 0, 0, 0, 0} }) -#define BDADDR_LOCAL (&(bdaddr_t) {{0, 0, 0, 0xff, 0xff, 0xff} }) +#define BDADDR_ANY (&(bdaddr_t) {{0, 0, 0, 0, 0, 0}}) +#define BDADDR_LOCAL (&(bdaddr_t) {{0, 0, 0, 0xff, 0xff, 0xff}}) /* Copy, swap, convert BD Address */ static inline int bacmp(bdaddr_t *ba1, bdaddr_t *ba2) @@ -212,7 +215,7 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags); int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags); -uint bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait); +uint bt_sock_poll(struct file * file, struct socket *sock, poll_table *wait); int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo); @@ -222,12 +225,12 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock); /* Skb helpers */ struct l2cap_ctrl { - unsigned int sframe:1, - poll:1, - final:1, - fcs:1, - sar:2, - super:2; + unsigned int sframe : 1, + poll : 1, + final : 1, + fcs : 1, + sar : 2, + super : 2; __u16 reqseq; __u16 txseq; __u8 retries; @@ -246,8 +249,7 @@ static inline struct sk_buff *bt_skb_alloc(unsigned int len, gfp_t how) { struct sk_buff *skb; - skb = alloc_skb(len + BT_SKB_RESERVE, how); - if (skb) { + if ((skb = alloc_skb(len + BT_SKB_RESERVE, how))) { skb_reserve(skb, BT_SKB_RESERVE); bt_cb(skb)->incoming = 0; } @@ -259,8 +261,7 @@ static inline struct sk_buff *bt_skb_send_alloc(struct sock *sk, { struct sk_buff *skb; - skb = sock_alloc_send_skb(sk, len + BT_SKB_RESERVE, nb, err); - if (skb) { + if ((skb = sock_alloc_send_skb(sk, len + BT_SKB_RESERVE, nb, err))) { skb_reserve(skb, BT_SKB_RESERVE); bt_cb(skb)->incoming = 0; } diff --git a/trunk/include/net/bluetooth/hci.h b/trunk/include/net/bluetooth/hci.h index 2a6b0b8b7120..66a7b579e31c 100644 --- a/trunk/include/net/bluetooth/hci.h +++ b/trunk/include/net/bluetooth/hci.h @@ -30,9 +30,6 @@ #define HCI_MAX_EVENT_SIZE 260 #define HCI_MAX_FRAME_SIZE (HCI_MAX_ACL_SIZE + 4) -#define HCI_LINK_KEY_SIZE 16 -#define HCI_AMP_LINK_KEY_SIZE (2 * HCI_LINK_KEY_SIZE) - /* HCI dev events */ #define HCI_DEV_REG 1 #define HCI_DEV_UNREG 2 @@ -59,12 +56,9 @@ #define HCI_BREDR 0x00 #define HCI_AMP 0x01 -/* First BR/EDR Controller shall have ID = 0 */ -#define HCI_BREDR_ID 0 - /* HCI device quirks */ enum { - HCI_QUIRK_RESET_ON_CLOSE, + HCI_QUIRK_NO_RESET, HCI_QUIRK_RAW_DEVICE, HCI_QUIRK_FIXUP_BUFFER_SIZE }; @@ -139,8 +133,10 @@ enum { #define HCIINQUIRY _IOR('H', 240, int) /* HCI timeouts */ +#define HCI_CONNECT_TIMEOUT (40000) /* 40 seconds */ #define HCI_DISCONN_TIMEOUT (2000) /* 2 seconds */ #define HCI_PAIRING_TIMEOUT (60000) /* 60 seconds */ +#define HCI_IDLE_TIMEOUT (6000) /* 6 seconds */ #define HCI_INIT_TIMEOUT (10000) /* 10 seconds */ #define HCI_CMD_TIMEOUT (1000) /* 1 seconds */ #define HCI_ACL_TX_TIMEOUT (45000) /* 45 seconds */ @@ -375,7 +371,7 @@ struct hci_cp_reject_conn_req { #define HCI_OP_LINK_KEY_REPLY 0x040b struct hci_cp_link_key_reply { bdaddr_t bdaddr; - __u8 link_key[HCI_LINK_KEY_SIZE]; + __u8 link_key[16]; } __packed; #define HCI_OP_LINK_KEY_NEG_REPLY 0x040c @@ -527,28 +523,6 @@ struct hci_cp_io_capability_neg_reply { __u8 reason; } __packed; -#define HCI_OP_CREATE_PHY_LINK 0x0435 -struct hci_cp_create_phy_link { - __u8 phy_handle; - __u8 key_len; - __u8 key_type; - __u8 key[HCI_AMP_LINK_KEY_SIZE]; -} __packed; - -#define HCI_OP_ACCEPT_PHY_LINK 0x0436 -struct hci_cp_accept_phy_link { - __u8 phy_handle; - __u8 key_len; - __u8 key_type; - __u8 key[HCI_AMP_LINK_KEY_SIZE]; -} __packed; - -#define HCI_OP_DISCONN_PHY_LINK 0x0437 -struct hci_cp_disconn_phy_link { - __u8 phy_handle; - __u8 reason; -} __packed; - #define HCI_OP_SNIFF_MODE 0x0803 struct hci_cp_sniff_mode { __le16 handle; @@ -844,31 +818,6 @@ struct hci_rp_read_local_amp_info { __le32 be_flush_to; } __packed; -#define HCI_OP_READ_LOCAL_AMP_ASSOC 0x140a -struct hci_cp_read_local_amp_assoc { - __u8 phy_handle; - __le16 len_so_far; - __le16 max_len; -} __packed; -struct hci_rp_read_local_amp_assoc { - __u8 status; - __u8 phy_handle; - __le16 rem_len; - __u8 frag[0]; -} __packed; - -#define HCI_OP_WRITE_REMOTE_AMP_ASSOC 0x140b -struct hci_cp_write_remote_amp_assoc { - __u8 phy_handle; - __le16 len_so_far; - __le16 rem_len; - __u8 frag[0]; -} __packed; -struct hci_rp_write_remote_amp_assoc { - __u8 status; - __u8 phy_handle; -} __packed; - #define HCI_OP_LE_SET_EVENT_MASK 0x2001 struct hci_cp_le_set_event_mask { __u8 mask[8]; @@ -1099,7 +1048,7 @@ struct hci_ev_link_key_req { #define HCI_EV_LINK_KEY_NOTIFY 0x18 struct hci_ev_link_key_notify { bdaddr_t bdaddr; - __u8 link_key[HCI_LINK_KEY_SIZE]; + __u8 link_key[16]; __u8 key_type; } __packed; @@ -1195,12 +1144,6 @@ struct extended_inquiry_info { __u8 data[240]; } __packed; -#define HCI_EV_KEY_REFRESH_COMPLETE 0x30 -struct hci_ev_key_refresh_complete { - __u8 status; - __le16 handle; -} __packed; - #define HCI_EV_IO_CAPA_REQUEST 0x31 struct hci_ev_io_capa_request { bdaddr_t bdaddr; @@ -1247,39 +1190,6 @@ struct hci_ev_le_meta { __u8 subevent; } __packed; -#define HCI_EV_PHY_LINK_COMPLETE 0x40 -struct hci_ev_phy_link_complete { - __u8 status; - __u8 phy_handle; -} __packed; - -#define HCI_EV_CHANNEL_SELECTED 0x41 -struct hci_ev_channel_selected { - __u8 phy_handle; -} __packed; - -#define HCI_EV_DISCONN_PHY_LINK_COMPLETE 0x42 -struct hci_ev_disconn_phy_link_complete { - __u8 status; - __u8 phy_handle; - __u8 reason; -} __packed; - -#define HCI_EV_LOGICAL_LINK_COMPLETE 0x45 -struct hci_ev_logical_link_complete { - __u8 status; - __le16 handle; - __u8 phy_handle; - __u8 flow_spec_id; -} __packed; - -#define HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE 0x46 -struct hci_ev_disconn_logical_link_complete { - __u8 status; - __le16 handle; - __u8 reason; -} __packed; - #define HCI_EV_NUM_COMP_BLOCKS 0x48 struct hci_comp_blocks_info { __le16 handle; @@ -1380,6 +1290,7 @@ struct hci_sco_hdr { __u8 dlen; } __packed; +#include static inline struct hci_event_hdr *hci_event_hdr(const struct sk_buff *skb) { return (struct hci_event_hdr *) skb->data; @@ -1396,12 +1307,12 @@ static inline struct hci_sco_hdr *hci_sco_hdr(const struct sk_buff *skb) } /* Command opcode pack/unpack */ -#define hci_opcode_pack(ogf, ocf) ((__u16) ((ocf & 0x03ff)|(ogf << 10))) +#define hci_opcode_pack(ogf, ocf) (__u16) ((ocf & 0x03ff)|(ogf << 10)) #define hci_opcode_ogf(op) (op >> 10) #define hci_opcode_ocf(op) (op & 0x03ff) /* ACL handle and flags pack/unpack */ -#define hci_handle_pack(h, f) ((__u16) ((h & 0x0fff)|(f << 12))) +#define hci_handle_pack(h, f) (__u16) ((h & 0x0fff)|(f << 12)) #define hci_handle(h) (h & 0x0fff) #define hci_flags(h) (h >> 12) diff --git a/trunk/include/net/bluetooth/hci_core.h b/trunk/include/net/bluetooth/hci_core.h index 20fd57367ddc..9fc7728f94e4 100644 --- a/trunk/include/net/bluetooth/hci_core.h +++ b/trunk/include/net/bluetooth/hci_core.h @@ -25,6 +25,7 @@ #ifndef __HCI_CORE_H #define __HCI_CORE_H +#include #include /* HCI priority */ @@ -64,7 +65,7 @@ struct discovery_state { DISCOVERY_RESOLVING, DISCOVERY_STOPPING, } state; - struct list_head all; /* All devices found during inquiry */ + struct list_head all; /* All devices found during inquiry */ struct list_head unknown; /* Name state not known */ struct list_head resolve; /* Name needs to be resolved */ __u32 timestamp; @@ -104,7 +105,7 @@ struct link_key { struct list_head list; bdaddr_t bdaddr; u8 type; - u8 val[HCI_LINK_KEY_SIZE]; + u8 val[16]; u8 pin_len; }; @@ -332,7 +333,6 @@ struct hci_conn { void *l2cap_data; void *sco_data; void *smp_conn; - struct amp_mgr *amp_mgr; struct hci_conn *link; @@ -360,8 +360,7 @@ extern int l2cap_connect_cfm(struct hci_conn *hcon, u8 status); extern int l2cap_disconn_ind(struct hci_conn *hcon); extern int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason); extern int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt); -extern int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, - u16 flags); +extern int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags); extern int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr); extern int sco_connect_cfm(struct hci_conn *hcon, __u8 status); @@ -430,8 +429,8 @@ enum { static inline bool hci_conn_ssp_enabled(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; - return test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) && - test_bit(HCI_CONN_SSP_ENABLED, &conn->flags); + return (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) && + test_bit(HCI_CONN_SSP_ENABLED, &conn->flags)); } static inline void hci_conn_hash_init(struct hci_dev *hdev) @@ -641,19 +640,6 @@ static inline void hci_set_drvdata(struct hci_dev *hdev, void *data) dev_set_drvdata(&hdev->dev, data); } -/* hci_dev_list shall be locked */ -static inline uint8_t __hci_num_ctrl(void) -{ - uint8_t count = 0; - struct list_head *p; - - list_for_each(p, &hci_dev_list) { - count++; - } - - return count; -} - struct hci_dev *hci_dev_get(int index); struct hci_dev *hci_get_route(bdaddr_t *src, bdaddr_t *dst); @@ -675,8 +661,7 @@ int hci_get_conn_info(struct hci_dev *hdev, void __user *arg); int hci_get_auth_info(struct hci_dev *hdev, void __user *arg); int hci_inquiry(void __user *arg); -struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, - bdaddr_t *bdaddr); +struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr); int hci_blacklist_clear(struct hci_dev *hdev); int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type); int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type); diff --git a/trunk/include/net/bluetooth/l2cap.h b/trunk/include/net/bluetooth/l2cap.h index d80e3f0691b4..1c7d1cd5e679 100644 --- a/trunk/include/net/bluetooth/l2cap.h +++ b/trunk/include/net/bluetooth/l2cap.h @@ -40,11 +40,11 @@ #define L2CAP_DEFAULT_MONITOR_TO 12000 /* 12 seconds */ #define L2CAP_DEFAULT_MAX_PDU_SIZE 1009 /* Sized for 3-DH5 packet */ #define L2CAP_DEFAULT_ACK_TO 200 +#define L2CAP_LE_DEFAULT_MTU 23 #define L2CAP_DEFAULT_MAX_SDU_SIZE 0xFFFF #define L2CAP_DEFAULT_SDU_ITIME 0xFFFFFFFF #define L2CAP_DEFAULT_ACC_LAT 0xFFFFFFFF #define L2CAP_BREDR_MAX_PAYLOAD 1019 /* 3-DH5 packet */ -#define L2CAP_LE_MIN_MTU 23 #define L2CAP_DISC_TIMEOUT msecs_to_jiffies(100) #define L2CAP_DISC_REJ_TIMEOUT msecs_to_jiffies(5000) @@ -52,8 +52,6 @@ #define L2CAP_CONN_TIMEOUT msecs_to_jiffies(40000) #define L2CAP_INFO_TIMEOUT msecs_to_jiffies(4000) -#define L2CAP_A2MP_DEFAULT_MTU 670 - /* L2CAP socket address */ struct sockaddr_l2 { sa_family_t l2_family; @@ -231,14 +229,9 @@ struct l2cap_conn_rsp { __le16 status; } __packed; -/* protocol/service multiplexer (PSM) */ -#define L2CAP_PSM_SDP 0x0001 -#define L2CAP_PSM_RFCOMM 0x0003 - /* channel indentifier */ #define L2CAP_CID_SIGNALING 0x0001 #define L2CAP_CID_CONN_LESS 0x0002 -#define L2CAP_CID_A2MP 0x0003 #define L2CAP_CID_LE_DATA 0x0004 #define L2CAP_CID_LE_SIGNALING 0x0005 #define L2CAP_CID_SMP 0x0006 @@ -278,9 +271,6 @@ struct l2cap_conf_rsp { #define L2CAP_CONF_PENDING 0x0004 #define L2CAP_CONF_EFS_REJECT 0x0005 -/* configuration req/rsp continuation flag */ -#define L2CAP_CONF_FLAG_CONTINUATION 0x0001 - struct l2cap_conf_opt { __u8 type; __u8 len; @@ -429,6 +419,11 @@ struct l2cap_seq_list { #define L2CAP_SEQ_LIST_CLEAR 0xFFFF #define L2CAP_SEQ_LIST_TAIL 0x8000 +struct srej_list { + __u16 tx_seq; + struct list_head list; +}; + struct l2cap_chan { struct sock *sk; @@ -480,12 +475,14 @@ struct l2cap_chan { __u16 expected_ack_seq; __u16 expected_tx_seq; __u16 buffer_seq; + __u16 buffer_seq_srej; __u16 srej_save_reqseq; __u16 last_acked_seq; __u16 frames_sent; __u16 unacked_frames; __u8 retry_count; __u16 srej_queue_next; + __u8 num_acked; __u16 sdu_len; struct sk_buff *sdu; struct sk_buff *sdu_last_frag; @@ -518,6 +515,7 @@ struct l2cap_chan { struct sk_buff_head srej_q; struct l2cap_seq_list srej_list; struct l2cap_seq_list retrans_list; + struct list_head srej_l; struct list_head list; struct list_head global_l; @@ -530,14 +528,10 @@ struct l2cap_chan { struct l2cap_ops { char *name; - struct l2cap_chan *(*new_connection) (struct l2cap_chan *chan); - int (*recv) (struct l2cap_chan * chan, - struct sk_buff *skb); - void (*teardown) (struct l2cap_chan *chan, int err); - void (*close) (struct l2cap_chan *chan); - void (*state_change) (struct l2cap_chan *chan, - int state); - void (*ready) (struct l2cap_chan *chan); + struct l2cap_chan *(*new_connection) (void *data); + int (*recv) (void *data, struct sk_buff *skb); + void (*close) (void *data); + void (*state_change) (void *data, int state); struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan, unsigned long len, int nb); }; @@ -581,7 +575,6 @@ struct l2cap_conn { #define L2CAP_CHAN_RAW 1 #define L2CAP_CHAN_CONN_LESS 2 #define L2CAP_CHAN_CONN_ORIENTED 3 -#define L2CAP_CHAN_CONN_FIX_A2MP 4 /* ----- L2CAP socket info ----- */ #define l2cap_pi(sk) ((struct l2cap_pinfo *) sk) @@ -604,7 +597,6 @@ enum { CONF_EWS_RECV, CONF_LOC_CONF_PEND, CONF_REM_CONF_PEND, - CONF_NOT_COMPLETE, }; #define L2CAP_CONF_MAX_CONF_REQ 2 @@ -721,7 +713,11 @@ static inline bool l2cap_clear_timer(struct l2cap_chan *chan, #define __set_chan_timer(c, t) l2cap_set_timer(c, &c->chan_timer, (t)) #define __clear_chan_timer(c) l2cap_clear_timer(c, &c->chan_timer) +#define __set_retrans_timer(c) l2cap_set_timer(c, &c->retrans_timer, \ + msecs_to_jiffies(L2CAP_DEFAULT_RETRANS_TO)); #define __clear_retrans_timer(c) l2cap_clear_timer(c, &c->retrans_timer) +#define __set_monitor_timer(c) l2cap_set_timer(c, &c->monitor_timer, \ + msecs_to_jiffies(L2CAP_DEFAULT_MONITOR_TO)); #define __clear_monitor_timer(c) l2cap_clear_timer(c, &c->monitor_timer) #define __set_ack_timer(c) l2cap_set_timer(c, &chan->ack_timer, \ msecs_to_jiffies(L2CAP_DEFAULT_ACK_TO)); @@ -740,17 +736,173 @@ static inline __u16 __next_seq(struct l2cap_chan *chan, __u16 seq) return (seq + 1) % (chan->tx_win_max + 1); } -static inline struct l2cap_chan *l2cap_chan_no_new_connection(struct l2cap_chan *chan) +static inline int l2cap_tx_window_full(struct l2cap_chan *ch) +{ + int sub; + + sub = (ch->next_tx_seq - ch->expected_ack_seq) % 64; + + if (sub < 0) + sub += 64; + + return sub == ch->remote_tx_win; +} + +static inline __u16 __get_reqseq(struct l2cap_chan *chan, __u32 ctrl) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return (ctrl & L2CAP_EXT_CTRL_REQSEQ) >> + L2CAP_EXT_CTRL_REQSEQ_SHIFT; + else + return (ctrl & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT; +} + +static inline __u32 __set_reqseq(struct l2cap_chan *chan, __u32 reqseq) { - return NULL; + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return (reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT) & + L2CAP_EXT_CTRL_REQSEQ; + else + return (reqseq << L2CAP_CTRL_REQSEQ_SHIFT) & L2CAP_CTRL_REQSEQ; } -static inline void l2cap_chan_no_teardown(struct l2cap_chan *chan, int err) +static inline __u16 __get_txseq(struct l2cap_chan *chan, __u32 ctrl) { + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return (ctrl & L2CAP_EXT_CTRL_TXSEQ) >> + L2CAP_EXT_CTRL_TXSEQ_SHIFT; + else + return (ctrl & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT; } -static inline void l2cap_chan_no_ready(struct l2cap_chan *chan) +static inline __u32 __set_txseq(struct l2cap_chan *chan, __u32 txseq) { + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return (txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT) & + L2CAP_EXT_CTRL_TXSEQ; + else + return (txseq << L2CAP_CTRL_TXSEQ_SHIFT) & L2CAP_CTRL_TXSEQ; +} + +static inline bool __is_sframe(struct l2cap_chan *chan, __u32 ctrl) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return ctrl & L2CAP_EXT_CTRL_FRAME_TYPE; + else + return ctrl & L2CAP_CTRL_FRAME_TYPE; +} + +static inline __u32 __set_sframe(struct l2cap_chan *chan) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return L2CAP_EXT_CTRL_FRAME_TYPE; + else + return L2CAP_CTRL_FRAME_TYPE; +} + +static inline __u8 __get_ctrl_sar(struct l2cap_chan *chan, __u32 ctrl) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return (ctrl & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT; + else + return (ctrl & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT; +} + +static inline __u32 __set_ctrl_sar(struct l2cap_chan *chan, __u32 sar) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return (sar << L2CAP_EXT_CTRL_SAR_SHIFT) & L2CAP_EXT_CTRL_SAR; + else + return (sar << L2CAP_CTRL_SAR_SHIFT) & L2CAP_CTRL_SAR; +} + +static inline bool __is_sar_start(struct l2cap_chan *chan, __u32 ctrl) +{ + return __get_ctrl_sar(chan, ctrl) == L2CAP_SAR_START; +} + +static inline __u32 __get_sar_mask(struct l2cap_chan *chan) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return L2CAP_EXT_CTRL_SAR; + else + return L2CAP_CTRL_SAR; +} + +static inline __u8 __get_ctrl_super(struct l2cap_chan *chan, __u32 ctrl) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return (ctrl & L2CAP_EXT_CTRL_SUPERVISE) >> + L2CAP_EXT_CTRL_SUPER_SHIFT; + else + return (ctrl & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT; +} + +static inline __u32 __set_ctrl_super(struct l2cap_chan *chan, __u32 super) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return (super << L2CAP_EXT_CTRL_SUPER_SHIFT) & + L2CAP_EXT_CTRL_SUPERVISE; + else + return (super << L2CAP_CTRL_SUPER_SHIFT) & + L2CAP_CTRL_SUPERVISE; +} + +static inline __u32 __set_ctrl_final(struct l2cap_chan *chan) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return L2CAP_EXT_CTRL_FINAL; + else + return L2CAP_CTRL_FINAL; +} + +static inline bool __is_ctrl_final(struct l2cap_chan *chan, __u32 ctrl) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return ctrl & L2CAP_EXT_CTRL_FINAL; + else + return ctrl & L2CAP_CTRL_FINAL; +} + +static inline __u32 __set_ctrl_poll(struct l2cap_chan *chan) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return L2CAP_EXT_CTRL_POLL; + else + return L2CAP_CTRL_POLL; +} + +static inline bool __is_ctrl_poll(struct l2cap_chan *chan, __u32 ctrl) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return ctrl & L2CAP_EXT_CTRL_POLL; + else + return ctrl & L2CAP_CTRL_POLL; +} + +static inline __u32 __get_control(struct l2cap_chan *chan, void *p) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return get_unaligned_le32(p); + else + return get_unaligned_le16(p); +} + +static inline void __put_control(struct l2cap_chan *chan, __u32 control, + void *p) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return put_unaligned_le32(control, p); + else + return put_unaligned_le16(control, p); +} + +static inline __u8 __ctrl_size(struct l2cap_chan *chan) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return L2CAP_EXT_HDR_SIZE - L2CAP_HDR_SIZE; + else + return L2CAP_ENH_HDR_SIZE - L2CAP_HDR_SIZE; } extern bool disable_ertm; @@ -774,8 +926,5 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len, void l2cap_chan_busy(struct l2cap_chan *chan, int busy); int l2cap_chan_check_security(struct l2cap_chan *chan); void l2cap_chan_set_defaults(struct l2cap_chan *chan); -int l2cap_ertm_init(struct l2cap_chan *chan); -void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan); -void l2cap_chan_del(struct l2cap_chan *chan, int err); #endif /* __L2CAP_H */ diff --git a/trunk/include/net/mac80211.h b/trunk/include/net/mac80211.h index 6914f9978aea..d152f54064fd 100644 --- a/trunk/include/net/mac80211.h +++ b/trunk/include/net/mac80211.h @@ -1945,11 +1945,6 @@ enum ieee80211_rate_control_changed { * to also unregister the device. If it returns 1, then mac80211 * will also go through the regular complete restart on resume. * - * @set_wakeup: Enable or disable wakeup when WoWLAN configuration is - * modified. The reason is that device_set_wakeup_enable() is - * supposed to be called when the configuration changes, not only - * in suspend(). - * * @add_interface: Called when a netdevice attached to the hardware is * enabled. Because it is not called for monitor mode devices, @start * and @stop must be implemented. @@ -2979,7 +2974,6 @@ __le16 ieee80211_ctstoself_duration(struct ieee80211_hw *hw, * ieee80211_generic_frame_duration - Calculate the duration field for a frame * @hw: pointer obtained from ieee80211_alloc_hw(). * @vif: &struct ieee80211_vif pointer from the add_interface callback. - * @band: the band to calculate the frame duration on * @frame_len: the length of the frame. * @rate: the rate at which the frame is going to be transmitted. * diff --git a/trunk/net/bluetooth/Makefile b/trunk/net/bluetooth/Makefile index fa6d94a4602a..2dc5a5700f53 100644 --- a/trunk/net/bluetooth/Makefile +++ b/trunk/net/bluetooth/Makefile @@ -9,5 +9,4 @@ obj-$(CONFIG_BT_CMTP) += cmtp/ obj-$(CONFIG_BT_HIDP) += hidp/ bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \ - hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o \ - a2mp.o + hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o sco.o lib.o diff --git a/trunk/net/bluetooth/a2mp.c b/trunk/net/bluetooth/a2mp.c deleted file mode 100644 index fb93250b3938..000000000000 --- a/trunk/net/bluetooth/a2mp.c +++ /dev/null @@ -1,568 +0,0 @@ -/* - Copyright (c) 2010,2011 Code Aurora Forum. All rights reserved. - Copyright (c) 2011,2012 Intel Corp. - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License version 2 and - only version 2 as published by the Free Software Foundation. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. -*/ - -#include -#include -#include -#include - -/* A2MP build & send command helper functions */ -static struct a2mp_cmd *__a2mp_build(u8 code, u8 ident, u16 len, void *data) -{ - struct a2mp_cmd *cmd; - int plen; - - plen = sizeof(*cmd) + len; - cmd = kzalloc(plen, GFP_KERNEL); - if (!cmd) - return NULL; - - cmd->code = code; - cmd->ident = ident; - cmd->len = cpu_to_le16(len); - - memcpy(cmd->data, data, len); - - return cmd; -} - -static void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len, - void *data) -{ - struct l2cap_chan *chan = mgr->a2mp_chan; - struct a2mp_cmd *cmd; - u16 total_len = len + sizeof(*cmd); - struct kvec iv; - struct msghdr msg; - - cmd = __a2mp_build(code, ident, len, data); - if (!cmd) - return; - - iv.iov_base = cmd; - iv.iov_len = total_len; - - memset(&msg, 0, sizeof(msg)); - - msg.msg_iov = (struct iovec *) &iv; - msg.msg_iovlen = 1; - - l2cap_chan_send(chan, &msg, total_len, 0); - - kfree(cmd); -} - -static inline void __a2mp_cl_bredr(struct a2mp_cl *cl) -{ - cl->id = 0; - cl->type = 0; - cl->status = 1; -} - -/* hci_dev_list shall be locked */ -static void __a2mp_add_cl(struct amp_mgr *mgr, struct a2mp_cl *cl, u8 num_ctrl) -{ - int i = 0; - struct hci_dev *hdev; - - __a2mp_cl_bredr(cl); - - list_for_each_entry(hdev, &hci_dev_list, list) { - /* Iterate through AMP controllers */ - if (hdev->id == HCI_BREDR_ID) - continue; - - /* Starting from second entry */ - if (++i >= num_ctrl) - return; - - cl[i].id = hdev->id; - cl[i].type = hdev->amp_type; - cl[i].status = hdev->amp_status; - } -} - -/* Processing A2MP messages */ -static int a2mp_command_rej(struct amp_mgr *mgr, struct sk_buff *skb, - struct a2mp_cmd *hdr) -{ - struct a2mp_cmd_rej *rej = (void *) skb->data; - - if (le16_to_cpu(hdr->len) < sizeof(*rej)) - return -EINVAL; - - BT_DBG("ident %d reason %d", hdr->ident, le16_to_cpu(rej->reason)); - - skb_pull(skb, sizeof(*rej)); - - return 0; -} - -static int a2mp_discover_req(struct amp_mgr *mgr, struct sk_buff *skb, - struct a2mp_cmd *hdr) -{ - struct a2mp_discov_req *req = (void *) skb->data; - u16 len = le16_to_cpu(hdr->len); - struct a2mp_discov_rsp *rsp; - u16 ext_feat; - u8 num_ctrl; - - if (len < sizeof(*req)) - return -EINVAL; - - skb_pull(skb, sizeof(*req)); - - ext_feat = le16_to_cpu(req->ext_feat); - - BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(req->mtu), ext_feat); - - /* check that packet is not broken for now */ - while (ext_feat & A2MP_FEAT_EXT) { - if (len < sizeof(ext_feat)) - return -EINVAL; - - ext_feat = get_unaligned_le16(skb->data); - BT_DBG("efm 0x%4.4x", ext_feat); - len -= sizeof(ext_feat); - skb_pull(skb, sizeof(ext_feat)); - } - - read_lock(&hci_dev_list_lock); - - num_ctrl = __hci_num_ctrl(); - len = num_ctrl * sizeof(struct a2mp_cl) + sizeof(*rsp); - rsp = kmalloc(len, GFP_ATOMIC); - if (!rsp) { - read_unlock(&hci_dev_list_lock); - return -ENOMEM; - } - - rsp->mtu = __constant_cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU); - rsp->ext_feat = 0; - - __a2mp_add_cl(mgr, rsp->cl, num_ctrl); - - read_unlock(&hci_dev_list_lock); - - a2mp_send(mgr, A2MP_DISCOVER_RSP, hdr->ident, len, rsp); - - kfree(rsp); - return 0; -} - -static int a2mp_change_notify(struct amp_mgr *mgr, struct sk_buff *skb, - struct a2mp_cmd *hdr) -{ - struct a2mp_cl *cl = (void *) skb->data; - - while (skb->len >= sizeof(*cl)) { - BT_DBG("Controller id %d type %d status %d", cl->id, cl->type, - cl->status); - cl = (struct a2mp_cl *) skb_pull(skb, sizeof(*cl)); - } - - /* TODO send A2MP_CHANGE_RSP */ - - return 0; -} - -static int a2mp_getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb, - struct a2mp_cmd *hdr) -{ - struct a2mp_info_req *req = (void *) skb->data; - struct a2mp_info_rsp rsp; - struct hci_dev *hdev; - - if (le16_to_cpu(hdr->len) < sizeof(*req)) - return -EINVAL; - - BT_DBG("id %d", req->id); - - rsp.id = req->id; - rsp.status = A2MP_STATUS_INVALID_CTRL_ID; - - hdev = hci_dev_get(req->id); - if (hdev && hdev->amp_type != HCI_BREDR) { - rsp.status = 0; - rsp.total_bw = cpu_to_le32(hdev->amp_total_bw); - rsp.max_bw = cpu_to_le32(hdev->amp_max_bw); - rsp.min_latency = cpu_to_le32(hdev->amp_min_latency); - rsp.pal_cap = cpu_to_le16(hdev->amp_pal_cap); - rsp.assoc_size = cpu_to_le16(hdev->amp_assoc_size); - } - - if (hdev) - hci_dev_put(hdev); - - a2mp_send(mgr, A2MP_GETINFO_RSP, hdr->ident, sizeof(rsp), &rsp); - - skb_pull(skb, sizeof(*req)); - return 0; -} - -static int a2mp_getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb, - struct a2mp_cmd *hdr) -{ - struct a2mp_amp_assoc_req *req = (void *) skb->data; - struct hci_dev *hdev; - - if (le16_to_cpu(hdr->len) < sizeof(*req)) - return -EINVAL; - - BT_DBG("id %d", req->id); - - hdev = hci_dev_get(req->id); - if (!hdev || hdev->amp_type == HCI_BREDR) { - struct a2mp_amp_assoc_rsp rsp; - rsp.id = req->id; - rsp.status = A2MP_STATUS_INVALID_CTRL_ID; - - a2mp_send(mgr, A2MP_GETAMPASSOC_RSP, hdr->ident, sizeof(rsp), - &rsp); - goto clean; - } - - /* Placeholder for HCI Read AMP Assoc */ - -clean: - if (hdev) - hci_dev_put(hdev); - - skb_pull(skb, sizeof(*req)); - return 0; -} - -static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb, - struct a2mp_cmd *hdr) -{ - struct a2mp_physlink_req *req = (void *) skb->data; - - struct a2mp_physlink_rsp rsp; - struct hci_dev *hdev; - - if (le16_to_cpu(hdr->len) < sizeof(*req)) - return -EINVAL; - - BT_DBG("local_id %d, remote_id %d", req->local_id, req->remote_id); - - rsp.local_id = req->remote_id; - rsp.remote_id = req->local_id; - - hdev = hci_dev_get(req->remote_id); - if (!hdev || hdev->amp_type != HCI_AMP) { - rsp.status = A2MP_STATUS_INVALID_CTRL_ID; - goto send_rsp; - } - - /* TODO process physlink create */ - - rsp.status = A2MP_STATUS_SUCCESS; - -send_rsp: - if (hdev) - hci_dev_put(hdev); - - a2mp_send(mgr, A2MP_CREATEPHYSLINK_RSP, hdr->ident, sizeof(rsp), - &rsp); - - skb_pull(skb, le16_to_cpu(hdr->len)); - return 0; -} - -static int a2mp_discphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb, - struct a2mp_cmd *hdr) -{ - struct a2mp_physlink_req *req = (void *) skb->data; - struct a2mp_physlink_rsp rsp; - struct hci_dev *hdev; - - if (le16_to_cpu(hdr->len) < sizeof(*req)) - return -EINVAL; - - BT_DBG("local_id %d remote_id %d", req->local_id, req->remote_id); - - rsp.local_id = req->remote_id; - rsp.remote_id = req->local_id; - rsp.status = A2MP_STATUS_SUCCESS; - - hdev = hci_dev_get(req->local_id); - if (!hdev) { - rsp.status = A2MP_STATUS_INVALID_CTRL_ID; - goto send_rsp; - } - - /* TODO Disconnect Phys Link here */ - - hci_dev_put(hdev); - -send_rsp: - a2mp_send(mgr, A2MP_DISCONNPHYSLINK_RSP, hdr->ident, sizeof(rsp), &rsp); - - skb_pull(skb, sizeof(*req)); - return 0; -} - -static inline int a2mp_cmd_rsp(struct amp_mgr *mgr, struct sk_buff *skb, - struct a2mp_cmd *hdr) -{ - BT_DBG("ident %d code %d", hdr->ident, hdr->code); - - skb_pull(skb, le16_to_cpu(hdr->len)); - return 0; -} - -/* Handle A2MP signalling */ -static int a2mp_chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb) -{ - struct a2mp_cmd *hdr = (void *) skb->data; - struct amp_mgr *mgr = chan->data; - int err = 0; - - amp_mgr_get(mgr); - - while (skb->len >= sizeof(*hdr)) { - struct a2mp_cmd *hdr = (void *) skb->data; - u16 len = le16_to_cpu(hdr->len); - - BT_DBG("code 0x%02x id %d len %d", hdr->code, hdr->ident, len); - - skb_pull(skb, sizeof(*hdr)); - - if (len > skb->len || !hdr->ident) { - err = -EINVAL; - break; - } - - mgr->ident = hdr->ident; - - switch (hdr->code) { - case A2MP_COMMAND_REJ: - a2mp_command_rej(mgr, skb, hdr); - break; - - case A2MP_DISCOVER_REQ: - err = a2mp_discover_req(mgr, skb, hdr); - break; - - case A2MP_CHANGE_NOTIFY: - err = a2mp_change_notify(mgr, skb, hdr); - break; - - case A2MP_GETINFO_REQ: - err = a2mp_getinfo_req(mgr, skb, hdr); - break; - - case A2MP_GETAMPASSOC_REQ: - err = a2mp_getampassoc_req(mgr, skb, hdr); - break; - - case A2MP_CREATEPHYSLINK_REQ: - err = a2mp_createphyslink_req(mgr, skb, hdr); - break; - - case A2MP_DISCONNPHYSLINK_REQ: - err = a2mp_discphyslink_req(mgr, skb, hdr); - break; - - case A2MP_CHANGE_RSP: - case A2MP_DISCOVER_RSP: - case A2MP_GETINFO_RSP: - case A2MP_GETAMPASSOC_RSP: - case A2MP_CREATEPHYSLINK_RSP: - case A2MP_DISCONNPHYSLINK_RSP: - err = a2mp_cmd_rsp(mgr, skb, hdr); - break; - - default: - BT_ERR("Unknown A2MP sig cmd 0x%2.2x", hdr->code); - err = -EINVAL; - break; - } - } - - if (err) { - struct a2mp_cmd_rej rej; - rej.reason = __constant_cpu_to_le16(0); - - BT_DBG("Send A2MP Rej: cmd 0x%2.2x err %d", hdr->code, err); - - a2mp_send(mgr, A2MP_COMMAND_REJ, hdr->ident, sizeof(rej), - &rej); - } - - /* Always free skb and return success error code to prevent - from sending L2CAP Disconnect over A2MP channel */ - kfree_skb(skb); - - amp_mgr_put(mgr); - - return 0; -} - -static void a2mp_chan_close_cb(struct l2cap_chan *chan) -{ - l2cap_chan_destroy(chan); -} - -static void a2mp_chan_state_change_cb(struct l2cap_chan *chan, int state) -{ - struct amp_mgr *mgr = chan->data; - - if (!mgr) - return; - - BT_DBG("chan %p state %s", chan, state_to_string(state)); - - chan->state = state; - - switch (state) { - case BT_CLOSED: - if (mgr) - amp_mgr_put(mgr); - break; - } -} - -static struct sk_buff *a2mp_chan_alloc_skb_cb(struct l2cap_chan *chan, - unsigned long len, int nb) -{ - return bt_skb_alloc(len, GFP_KERNEL); -} - -static struct l2cap_ops a2mp_chan_ops = { - .name = "L2CAP A2MP channel", - .recv = a2mp_chan_recv_cb, - .close = a2mp_chan_close_cb, - .state_change = a2mp_chan_state_change_cb, - .alloc_skb = a2mp_chan_alloc_skb_cb, - - /* Not implemented for A2MP */ - .new_connection = l2cap_chan_no_new_connection, - .teardown = l2cap_chan_no_teardown, - .ready = l2cap_chan_no_ready, -}; - -static struct l2cap_chan *a2mp_chan_open(struct l2cap_conn *conn) -{ - struct l2cap_chan *chan; - int err; - - chan = l2cap_chan_create(); - if (!chan) - return NULL; - - BT_DBG("chan %p", chan); - - chan->chan_type = L2CAP_CHAN_CONN_FIX_A2MP; - chan->flush_to = L2CAP_DEFAULT_FLUSH_TO; - - chan->ops = &a2mp_chan_ops; - - l2cap_chan_set_defaults(chan); - chan->remote_max_tx = chan->max_tx; - chan->remote_tx_win = chan->tx_win; - - chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO; - chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO; - - skb_queue_head_init(&chan->tx_q); - - chan->mode = L2CAP_MODE_ERTM; - - err = l2cap_ertm_init(chan); - if (err < 0) { - l2cap_chan_del(chan, 0); - return NULL; - } - - chan->conf_state = 0; - - l2cap_chan_add(conn, chan); - - chan->remote_mps = chan->omtu; - chan->mps = chan->omtu; - - chan->state = BT_CONNECTED; - - return chan; -} - -/* AMP Manager functions */ -void amp_mgr_get(struct amp_mgr *mgr) -{ - BT_DBG("mgr %p", mgr); - - kref_get(&mgr->kref); -} - -static void amp_mgr_destroy(struct kref *kref) -{ - struct amp_mgr *mgr = container_of(kref, struct amp_mgr, kref); - - BT_DBG("mgr %p", mgr); - - kfree(mgr); -} - -int amp_mgr_put(struct amp_mgr *mgr) -{ - BT_DBG("mgr %p", mgr); - - return kref_put(&mgr->kref, &_mgr_destroy); -} - -static struct amp_mgr *amp_mgr_create(struct l2cap_conn *conn) -{ - struct amp_mgr *mgr; - struct l2cap_chan *chan; - - mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); - if (!mgr) - return NULL; - - BT_DBG("conn %p mgr %p", conn, mgr); - - mgr->l2cap_conn = conn; - - chan = a2mp_chan_open(conn); - if (!chan) { - kfree(mgr); - return NULL; - } - - mgr->a2mp_chan = chan; - chan->data = mgr; - - conn->hcon->amp_mgr = mgr; - - kref_init(&mgr->kref); - - return mgr; -} - -struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn, - struct sk_buff *skb) -{ - struct amp_mgr *mgr; - - mgr = amp_mgr_create(conn); - if (!mgr) { - BT_ERR("Could not create AMP manager"); - return NULL; - } - - BT_DBG("mgr: %p chan %p", mgr, mgr->a2mp_chan); - - return mgr->a2mp_chan; -} diff --git a/trunk/net/bluetooth/af_bluetooth.c b/trunk/net/bluetooth/af_bluetooth.c index f7db5792ec64..3e18af4dadc4 100644 --- a/trunk/net/bluetooth/af_bluetooth.c +++ b/trunk/net/bluetooth/af_bluetooth.c @@ -25,7 +25,18 @@ /* Bluetooth address family and sockets. */ #include + +#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include #include @@ -407,8 +418,7 @@ static inline unsigned int bt_accept_poll(struct sock *parent) return 0; } -unsigned int bt_sock_poll(struct file *file, struct socket *sock, - poll_table *wait) +unsigned int bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; unsigned int mask = 0; diff --git a/trunk/net/bluetooth/bnep/core.c b/trunk/net/bluetooth/bnep/core.c index 4a6620bc1570..031d7d656754 100644 --- a/trunk/net/bluetooth/bnep/core.c +++ b/trunk/net/bluetooth/bnep/core.c @@ -26,9 +26,26 @@ */ #include + +#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include + +#include #include + +#include #include +#include + #include #include @@ -289,7 +306,7 @@ static u8 __bnep_rx_hlen[] = { ETH_ALEN + 2 /* BNEP_COMPRESSED_DST_ONLY */ }; -static int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb) +static inline int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb) { struct net_device *dev = s->dev; struct sk_buff *nskb; @@ -387,7 +404,7 @@ static u8 __bnep_tx_types[] = { BNEP_COMPRESSED }; -static int bnep_tx_frame(struct bnep_session *s, struct sk_buff *skb) +static inline int bnep_tx_frame(struct bnep_session *s, struct sk_buff *skb) { struct ethhdr *eh = (void *) skb->data; struct socket *sock = s->sock; diff --git a/trunk/net/bluetooth/bnep/netdev.c b/trunk/net/bluetooth/bnep/netdev.c index 98f86f91d47c..bc4086480d97 100644 --- a/trunk/net/bluetooth/bnep/netdev.c +++ b/trunk/net/bluetooth/bnep/netdev.c @@ -25,8 +25,16 @@ SOFTWARE IS DISCLAIMED. */ -#include +#include +#include + +#include +#include #include +#include +#include + +#include #include #include @@ -120,7 +128,7 @@ static void bnep_net_timeout(struct net_device *dev) } #ifdef CONFIG_BT_BNEP_MC_FILTER -static int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s) +static inline int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s) { struct ethhdr *eh = (void *) skb->data; @@ -132,7 +140,7 @@ static int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s) #ifdef CONFIG_BT_BNEP_PROTO_FILTER /* Determine ether protocol. Based on eth_type_trans. */ -static u16 bnep_net_eth_proto(struct sk_buff *skb) +static inline u16 bnep_net_eth_proto(struct sk_buff *skb) { struct ethhdr *eh = (void *) skb->data; u16 proto = ntohs(eh->h_proto); @@ -146,7 +154,7 @@ static u16 bnep_net_eth_proto(struct sk_buff *skb) return ETH_P_802_2; } -static int bnep_net_proto_filter(struct sk_buff *skb, struct bnep_session *s) +static inline int bnep_net_proto_filter(struct sk_buff *skb, struct bnep_session *s) { u16 proto = bnep_net_eth_proto(skb); struct bnep_proto_filter *f = s->proto_filter; diff --git a/trunk/net/bluetooth/bnep/sock.c b/trunk/net/bluetooth/bnep/sock.c index 5e5f5b410e0b..180bfc45810d 100644 --- a/trunk/net/bluetooth/bnep/sock.c +++ b/trunk/net/bluetooth/bnep/sock.c @@ -24,8 +24,24 @@ SOFTWARE IS DISCLAIMED. */ -#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include +#include +#include +#include + #include "bnep.h" diff --git a/trunk/net/bluetooth/hci_conn.c b/trunk/net/bluetooth/hci_conn.c index 2fcced377e50..3f18a6ed9731 100644 --- a/trunk/net/bluetooth/hci_conn.c +++ b/trunk/net/bluetooth/hci_conn.c @@ -24,11 +24,24 @@ /* Bluetooth HCI connection handling. */ -#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include #include #include -#include static void hci_le_connect(struct hci_conn *conn) { @@ -41,15 +54,15 @@ static void hci_le_connect(struct hci_conn *conn) conn->sec_level = BT_SECURITY_LOW; memset(&cp, 0, sizeof(cp)); - cp.scan_interval = __constant_cpu_to_le16(0x0060); - cp.scan_window = __constant_cpu_to_le16(0x0030); + cp.scan_interval = cpu_to_le16(0x0060); + cp.scan_window = cpu_to_le16(0x0030); bacpy(&cp.peer_addr, &conn->dst); cp.peer_addr_type = conn->dst_type; - cp.conn_interval_min = __constant_cpu_to_le16(0x0028); - cp.conn_interval_max = __constant_cpu_to_le16(0x0038); - cp.supervision_timeout = __constant_cpu_to_le16(0x002a); - cp.min_ce_len = __constant_cpu_to_le16(0x0000); - cp.max_ce_len = __constant_cpu_to_le16(0x0000); + cp.conn_interval_min = cpu_to_le16(0x0028); + cp.conn_interval_max = cpu_to_le16(0x0038); + cp.supervision_timeout = cpu_to_le16(0x002a); + cp.min_ce_len = cpu_to_le16(0x0000); + cp.max_ce_len = cpu_to_le16(0x0000); hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp); } @@ -86,7 +99,7 @@ void hci_acl_connect(struct hci_conn *conn) cp.pscan_rep_mode = ie->data.pscan_rep_mode; cp.pscan_mode = ie->data.pscan_mode; cp.clock_offset = ie->data.clock_offset | - __constant_cpu_to_le16(0x8000); + cpu_to_le16(0x8000); } memcpy(conn->dev_class, ie->data.dev_class, 3); @@ -162,9 +175,9 @@ void hci_setup_sync(struct hci_conn *conn, __u16 handle) cp.handle = cpu_to_le16(handle); cp.pkt_type = cpu_to_le16(conn->pkt_type); - cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40); - cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40); - cp.max_latency = __constant_cpu_to_le16(0xffff); + cp.tx_bandwidth = cpu_to_le32(0x00001f40); + cp.rx_bandwidth = cpu_to_le32(0x00001f40); + cp.max_latency = cpu_to_le16(0xffff); cp.voice_setting = cpu_to_le16(hdev->voice_setting); cp.retrans_effort = 0xff; @@ -172,7 +185,7 @@ void hci_setup_sync(struct hci_conn *conn, __u16 handle) } void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, - u16 latency, u16 to_multiplier) + u16 latency, u16 to_multiplier) { struct hci_cp_le_conn_update cp; struct hci_dev *hdev = conn->hdev; @@ -184,14 +197,15 @@ void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, cp.conn_interval_max = cpu_to_le16(max); cp.conn_latency = cpu_to_le16(latency); cp.supervision_timeout = cpu_to_le16(to_multiplier); - cp.min_ce_len = __constant_cpu_to_le16(0x0001); - cp.max_ce_len = __constant_cpu_to_le16(0x0001); + cp.min_ce_len = cpu_to_le16(0x0001); + cp.max_ce_len = cpu_to_le16(0x0001); hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp); } +EXPORT_SYMBOL(hci_le_conn_update); void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8], - __u8 ltk[16]) + __u8 ltk[16]) { struct hci_dev *hdev = conn->hdev; struct hci_cp_le_start_enc cp; @@ -207,6 +221,7 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8], hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp); } +EXPORT_SYMBOL(hci_le_start_enc); /* Device _must_ be locked */ void hci_sco_setup(struct hci_conn *conn, __u8 status) @@ -232,7 +247,7 @@ void hci_sco_setup(struct hci_conn *conn, __u8 status) static void hci_conn_timeout(struct work_struct *work) { struct hci_conn *conn = container_of(work, struct hci_conn, - disc_work.work); + disc_work.work); __u8 reason; BT_DBG("conn %p state %s", conn, state_to_string(conn->state)); @@ -280,9 +295,9 @@ static void hci_conn_enter_sniff_mode(struct hci_conn *conn) if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) { struct hci_cp_sniff_subrate cp; cp.handle = cpu_to_le16(conn->handle); - cp.max_latency = __constant_cpu_to_le16(0); - cp.min_remote_timeout = __constant_cpu_to_le16(0); - cp.min_local_timeout = __constant_cpu_to_le16(0); + cp.max_latency = cpu_to_le16(0); + cp.min_remote_timeout = cpu_to_le16(0); + cp.min_local_timeout = cpu_to_le16(0); hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp); } @@ -291,8 +306,8 @@ static void hci_conn_enter_sniff_mode(struct hci_conn *conn) cp.handle = cpu_to_le16(conn->handle); cp.max_interval = cpu_to_le16(hdev->sniff_max_interval); cp.min_interval = cpu_to_le16(hdev->sniff_min_interval); - cp.attempt = __constant_cpu_to_le16(4); - cp.timeout = __constant_cpu_to_le16(1); + cp.attempt = cpu_to_le16(4); + cp.timeout = cpu_to_le16(1); hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp); } } @@ -312,7 +327,7 @@ static void hci_conn_auto_accept(unsigned long arg) struct hci_dev *hdev = conn->hdev; hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst), - &conn->dst); + &conn->dst); } struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) @@ -361,7 +376,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst) INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout); setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn); setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept, - (unsigned long) conn); + (unsigned long) conn); atomic_set(&conn->refcnt, 0); @@ -410,10 +425,8 @@ int hci_conn_del(struct hci_conn *conn) } } - hci_chan_list_flush(conn); - if (conn->amp_mgr) - amp_mgr_put(conn->amp_mgr); + hci_chan_list_flush(conn); hci_conn_hash_del(hdev, conn); if (hdev->notify) @@ -441,8 +454,7 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src) read_lock(&hci_dev_list_lock); list_for_each_entry(d, &hci_dev_list, list) { - if (!test_bit(HCI_UP, &d->flags) || - test_bit(HCI_RAW, &d->flags)) + if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags)) continue; /* Simple routing: @@ -483,11 +495,6 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, if (type == LE_LINK) { le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst); if (!le) { - le = hci_conn_hash_lookup_state(hdev, LE_LINK, - BT_CONNECT); - if (le) - return ERR_PTR(-EBUSY); - le = hci_conn_add(hdev, LE_LINK, dst); if (!le) return ERR_PTR(-ENOMEM); @@ -538,7 +545,7 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, hci_conn_hold(sco); if (acl->state == BT_CONNECTED && - (sco->state == BT_OPEN || sco->state == BT_CLOSED)) { + (sco->state == BT_OPEN || sco->state == BT_CLOSED)) { set_bit(HCI_CONN_POWER_SAVE, &acl->flags); hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON); @@ -553,6 +560,7 @@ struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, return sco; } +EXPORT_SYMBOL(hci_connect); /* Check link security requirement */ int hci_conn_check_link_mode(struct hci_conn *conn) @@ -564,6 +572,7 @@ int hci_conn_check_link_mode(struct hci_conn *conn) return 1; } +EXPORT_SYMBOL(hci_conn_check_link_mode); /* Authenticate remote device */ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) @@ -591,7 +600,7 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) cp.handle = cpu_to_le16(conn->handle); hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED, - sizeof(cp), &cp); + sizeof(cp), &cp); if (conn->key_type != 0xff) set_bit(HCI_CONN_REAUTH_PEND, &conn->flags); } @@ -609,7 +618,7 @@ static void hci_conn_encrypt(struct hci_conn *conn) cp.handle = cpu_to_le16(conn->handle); cp.encrypt = 0x01; hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), - &cp); + &cp); } } @@ -639,7 +648,8 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) /* An unauthenticated combination key has sufficient security for security level 1 and 2. */ if (conn->key_type == HCI_LK_UNAUTH_COMBINATION && - (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW)) + (sec_level == BT_SECURITY_MEDIUM || + sec_level == BT_SECURITY_LOW)) goto encrypt; /* A combination key has always sufficient security for the security @@ -647,7 +657,8 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) is generated using maximum PIN code length (16). For pre 2.1 units. */ if (conn->key_type == HCI_LK_COMBINATION && - (sec_level != BT_SECURITY_HIGH || conn->pin_length == 16)) + (sec_level != BT_SECURITY_HIGH || + conn->pin_length == 16)) goto encrypt; auth: @@ -690,11 +701,12 @@ int hci_conn_change_link_key(struct hci_conn *conn) struct hci_cp_change_conn_link_key cp; cp.handle = cpu_to_le16(conn->handle); hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY, - sizeof(cp), &cp); + sizeof(cp), &cp); } return 0; } +EXPORT_SYMBOL(hci_conn_change_link_key); /* Switch role */ int hci_conn_switch_role(struct hci_conn *conn, __u8 role) @@ -740,7 +752,7 @@ void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active) timer: if (hdev->idle_timeout > 0) mod_timer(&conn->idle_timer, - jiffies + msecs_to_jiffies(hdev->idle_timeout)); + jiffies + msecs_to_jiffies(hdev->idle_timeout)); } /* Drop all connection on the device */ @@ -790,7 +802,7 @@ EXPORT_SYMBOL(hci_conn_put_device); int hci_get_conn_list(void __user *arg) { - struct hci_conn *c; + register struct hci_conn *c; struct hci_conn_list_req req, *cl; struct hci_conn_info *ci; struct hci_dev *hdev; diff --git a/trunk/net/bluetooth/hci_core.c b/trunk/net/bluetooth/hci_core.c index 08994ecc3b6a..411ace8e647b 100644 --- a/trunk/net/bluetooth/hci_core.c +++ b/trunk/net/bluetooth/hci_core.c @@ -25,10 +25,28 @@ /* Bluetooth HCI core. */ -#include -#include - +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include +#include + +#include +#include #include #include @@ -47,9 +65,6 @@ DEFINE_RWLOCK(hci_dev_list_lock); LIST_HEAD(hci_cb_list); DEFINE_RWLOCK(hci_cb_list_lock); -/* HCI ID Numbering */ -static DEFINE_IDA(hci_index_ida); - /* ---- HCI notifications ---- */ static void hci_notify(struct hci_dev *hdev, int event) @@ -109,9 +124,8 @@ static void hci_req_cancel(struct hci_dev *hdev, int err) } /* Execute request and wait for completion. */ -static int __hci_request(struct hci_dev *hdev, - void (*req)(struct hci_dev *hdev, unsigned long opt), - unsigned long opt, __u32 timeout) +static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), + unsigned long opt, __u32 timeout) { DECLARE_WAITQUEUE(wait, current); int err = 0; @@ -152,9 +166,8 @@ static int __hci_request(struct hci_dev *hdev, return err; } -static int hci_request(struct hci_dev *hdev, - void (*req)(struct hci_dev *hdev, unsigned long opt), - unsigned long opt, __u32 timeout) +static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt), + unsigned long opt, __u32 timeout) { int ret; @@ -189,7 +202,7 @@ static void bredr_init(struct hci_dev *hdev) /* Mandatory initialization */ /* Reset */ - if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) { + if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) { set_bit(HCI_RESET, &hdev->flags); hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL); } @@ -222,7 +235,7 @@ static void bredr_init(struct hci_dev *hdev) hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type); /* Connection accept timeout ~20 secs */ - param = __constant_cpu_to_le16(0x7d00); + param = cpu_to_le16(0x7d00); hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m); bacpy(&cp.bdaddr, BDADDR_ANY); @@ -404,8 +417,7 @@ static void inquiry_cache_flush(struct hci_dev *hdev) INIT_LIST_HEAD(&cache->resolve); } -struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, - bdaddr_t *bdaddr) +struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr) { struct discovery_state *cache = &hdev->discovery; struct inquiry_entry *e; @@ -466,7 +478,7 @@ void hci_inquiry_cache_update_resolve(struct hci_dev *hdev, list_for_each_entry(p, &cache->resolve, list) { if (p->name_state != NAME_PENDING && - abs(p->data.rssi) >= abs(ie->data.rssi)) + abs(p->data.rssi) >= abs(ie->data.rssi)) break; pos = &p->list; } @@ -491,7 +503,7 @@ bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data, *ssp = true; if (ie->name_state == NAME_NEEDED && - data->rssi != ie->data.rssi) { + data->rssi != ie->data.rssi) { ie->data.rssi = data->rssi; hci_inquiry_cache_update_resolve(hdev, ie); } @@ -515,7 +527,7 @@ bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data, update: if (name_known && ie->name_state != NAME_KNOWN && - ie->name_state != NAME_PENDING) { + ie->name_state != NAME_PENDING) { ie->name_state = NAME_KNOWN; list_del(&ie->list); } @@ -593,7 +605,8 @@ int hci_inquiry(void __user *arg) hci_dev_lock(hdev); if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX || - inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) { + inquiry_cache_empty(hdev) || + ir.flags & IREQ_CACHE_FLUSH) { inquiry_cache_flush(hdev); do_inquiry = 1; } @@ -607,9 +620,7 @@ int hci_inquiry(void __user *arg) goto done; } - /* for unlimited number of responses we will use buffer with - * 255 entries - */ + /* for unlimited number of responses we will use buffer with 255 entries */ max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp; /* cache_dump can't sleep. Therefore we allocate temp buffer and then @@ -630,7 +641,7 @@ int hci_inquiry(void __user *arg) if (!copy_to_user(ptr, &ir, sizeof(ir))) { ptr += sizeof(ir); if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) * - ir.num_rsp)) + ir.num_rsp)) err = -EFAULT; } else err = -EFAULT; @@ -691,11 +702,11 @@ int hci_dev_open(__u16 dev) hdev->init_last_cmd = 0; ret = __hci_request(hdev, hci_init_req, 0, - msecs_to_jiffies(HCI_INIT_TIMEOUT)); + msecs_to_jiffies(HCI_INIT_TIMEOUT)); if (lmp_host_le_capable(hdev)) ret = __hci_request(hdev, hci_le_init_req, 0, - msecs_to_jiffies(HCI_INIT_TIMEOUT)); + msecs_to_jiffies(HCI_INIT_TIMEOUT)); clear_bit(HCI_INIT, &hdev->flags); } @@ -780,10 +791,10 @@ static int hci_dev_do_close(struct hci_dev *hdev) skb_queue_purge(&hdev->cmd_q); atomic_set(&hdev->cmd_cnt, 1); if (!test_bit(HCI_RAW, &hdev->flags) && - test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) { + test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) { set_bit(HCI_INIT, &hdev->flags); __hci_request(hdev, hci_reset_req, 0, - msecs_to_jiffies(250)); + msecs_to_jiffies(250)); clear_bit(HCI_INIT, &hdev->flags); } @@ -873,7 +884,7 @@ int hci_dev_reset(__u16 dev) if (!test_bit(HCI_RAW, &hdev->flags)) ret = __hci_request(hdev, hci_reset_req, 0, - msecs_to_jiffies(HCI_INIT_TIMEOUT)); + msecs_to_jiffies(HCI_INIT_TIMEOUT)); done: hci_req_unlock(hdev); @@ -913,7 +924,7 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg) switch (cmd) { case HCISETAUTH: err = hci_request(hdev, hci_auth_req, dr.dev_opt, - msecs_to_jiffies(HCI_INIT_TIMEOUT)); + msecs_to_jiffies(HCI_INIT_TIMEOUT)); break; case HCISETENCRYPT: @@ -925,23 +936,23 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg) if (!test_bit(HCI_AUTH, &hdev->flags)) { /* Auth must be enabled first */ err = hci_request(hdev, hci_auth_req, dr.dev_opt, - msecs_to_jiffies(HCI_INIT_TIMEOUT)); + msecs_to_jiffies(HCI_INIT_TIMEOUT)); if (err) break; } err = hci_request(hdev, hci_encrypt_req, dr.dev_opt, - msecs_to_jiffies(HCI_INIT_TIMEOUT)); + msecs_to_jiffies(HCI_INIT_TIMEOUT)); break; case HCISETSCAN: err = hci_request(hdev, hci_scan_req, dr.dev_opt, - msecs_to_jiffies(HCI_INIT_TIMEOUT)); + msecs_to_jiffies(HCI_INIT_TIMEOUT)); break; case HCISETLINKPOL: err = hci_request(hdev, hci_linkpol_req, dr.dev_opt, - msecs_to_jiffies(HCI_INIT_TIMEOUT)); + msecs_to_jiffies(HCI_INIT_TIMEOUT)); break; case HCISETLINKMODE: @@ -1092,7 +1103,7 @@ static void hci_power_on(struct work_struct *work) if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) schedule_delayed_work(&hdev->power_off, - msecs_to_jiffies(AUTO_OFF_TIMEOUT)); + msecs_to_jiffies(AUTO_OFF_TIMEOUT)); if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) mgmt_index_added(hdev); @@ -1101,7 +1112,7 @@ static void hci_power_on(struct work_struct *work) static void hci_power_off(struct work_struct *work) { struct hci_dev *hdev = container_of(work, struct hci_dev, - power_off.work); + power_off.work); BT_DBG("%s", hdev->name); @@ -1182,7 +1193,7 @@ struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) } static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn, - u8 key_type, u8 old_key_type) + u8 key_type, u8 old_key_type) { /* Legacy key */ if (key_type < 0x03) @@ -1223,7 +1234,7 @@ struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8]) list_for_each_entry(k, &hdev->long_term_keys, list) { if (k->ediv != ediv || - memcmp(rand, k->rand, sizeof(k->rand))) + memcmp(rand, k->rand, sizeof(k->rand))) continue; return k; @@ -1231,6 +1242,7 @@ struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8]) return NULL; } +EXPORT_SYMBOL(hci_find_ltk); struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type) @@ -1239,11 +1251,12 @@ struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, list_for_each_entry(k, &hdev->long_term_keys, list) if (addr_type == k->bdaddr_type && - bacmp(bdaddr, &k->bdaddr) == 0) + bacmp(bdaddr, &k->bdaddr) == 0) return k; return NULL; } +EXPORT_SYMBOL(hci_find_ltk_by_addr); int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key, bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len) @@ -1270,14 +1283,15 @@ int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key, * combination key for legacy pairing even when there's no * previous key */ if (type == HCI_LK_CHANGED_COMBINATION && - (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) { + (!conn || conn->remote_auth == 0xff) && + old_key_type == 0xff) { type = HCI_LK_COMBINATION; if (conn) conn->key_type = type; } bacpy(&key->bdaddr, bdaddr); - memcpy(key->val, val, HCI_LINK_KEY_SIZE); + memcpy(key->val, val, 16); key->pin_len = pin_len; if (type == HCI_LK_CHANGED_COMBINATION) @@ -1526,7 +1540,6 @@ static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt) memset(&cp, 0, sizeof(cp)); cp.enable = 1; - cp.filter_dup = 1; hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); } @@ -1694,39 +1707,41 @@ EXPORT_SYMBOL(hci_free_dev); /* Register HCI device */ int hci_register_dev(struct hci_dev *hdev) { + struct list_head *head, *p; int id, error; if (!hdev->open || !hdev->close) return -EINVAL; + write_lock(&hci_dev_list_lock); + /* Do not allow HCI_AMP devices to register at index 0, * so the index can be used as the AMP controller ID. */ - switch (hdev->dev_type) { - case HCI_BREDR: - id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL); - break; - case HCI_AMP: - id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL); - break; - default: - return -EINVAL; - } + id = (hdev->dev_type == HCI_BREDR) ? 0 : 1; + head = &hci_dev_list; - if (id < 0) - return id; + /* Find first available device id */ + list_for_each(p, &hci_dev_list) { + int nid = list_entry(p, struct hci_dev, list)->id; + if (nid > id) + break; + if (nid == id) + id++; + head = p; + } sprintf(hdev->name, "hci%d", id); hdev->id = id; BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); - write_lock(&hci_dev_list_lock); - list_add(&hdev->list, &hci_dev_list); + list_add(&hdev->list, head); + write_unlock(&hci_dev_list_lock); hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND | - WQ_MEM_RECLAIM, 1); + WQ_MEM_RECLAIM, 1); if (!hdev->workqueue) { error = -ENOMEM; goto err; @@ -1737,8 +1752,7 @@ int hci_register_dev(struct hci_dev *hdev) goto err_wqueue; hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev, - RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, - hdev); + RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev); if (hdev->rfkill) { if (rfkill_register(hdev->rfkill) < 0) { rfkill_destroy(hdev->rfkill); @@ -1758,7 +1772,6 @@ int hci_register_dev(struct hci_dev *hdev) err_wqueue: destroy_workqueue(hdev->workqueue); err: - ida_simple_remove(&hci_index_ida, hdev->id); write_lock(&hci_dev_list_lock); list_del(&hdev->list); write_unlock(&hci_dev_list_lock); @@ -1770,14 +1783,12 @@ EXPORT_SYMBOL(hci_register_dev); /* Unregister HCI device */ void hci_unregister_dev(struct hci_dev *hdev) { - int i, id; + int i; BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); set_bit(HCI_UNREGISTER, &hdev->dev_flags); - id = hdev->id; - write_lock(&hci_dev_list_lock); list_del(&hdev->list); write_unlock(&hci_dev_list_lock); @@ -1788,7 +1799,7 @@ void hci_unregister_dev(struct hci_dev *hdev) kfree_skb(hdev->reassembly[i]); if (!test_bit(HCI_INIT, &hdev->flags) && - !test_bit(HCI_SETUP, &hdev->dev_flags)) { + !test_bit(HCI_SETUP, &hdev->dev_flags)) { hci_dev_lock(hdev); mgmt_index_removed(hdev); hci_dev_unlock(hdev); @@ -1818,8 +1829,6 @@ void hci_unregister_dev(struct hci_dev *hdev) hci_dev_unlock(hdev); hci_dev_put(hdev); - - ida_simple_remove(&hci_index_ida, id); } EXPORT_SYMBOL(hci_unregister_dev); @@ -1844,7 +1853,7 @@ int hci_recv_frame(struct sk_buff *skb) { struct hci_dev *hdev = (struct hci_dev *) skb->dev; if (!hdev || (!test_bit(HCI_UP, &hdev->flags) - && !test_bit(HCI_INIT, &hdev->flags))) { + && !test_bit(HCI_INIT, &hdev->flags))) { kfree_skb(skb); return -ENXIO; } @@ -1863,7 +1872,7 @@ int hci_recv_frame(struct sk_buff *skb) EXPORT_SYMBOL(hci_recv_frame); static int hci_reassembly(struct hci_dev *hdev, int type, void *data, - int count, __u8 index) + int count, __u8 index) { int len = 0; int hlen = 0; @@ -1872,7 +1881,7 @@ static int hci_reassembly(struct hci_dev *hdev, int type, void *data, struct bt_skb_cb *scb; if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) || - index >= NUM_REASSEMBLY) + index >= NUM_REASSEMBLY) return -EILSEQ; skb = hdev->reassembly[index]; @@ -2014,7 +2023,7 @@ int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count) type = bt_cb(skb)->pkt_type; rem = hci_reassembly(hdev, type, data, count, - STREAM_REASSEMBLY); + STREAM_REASSEMBLY); if (rem < 0) return rem; @@ -2148,7 +2157,7 @@ static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags) } static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue, - struct sk_buff *skb, __u16 flags) + struct sk_buff *skb, __u16 flags) { struct hci_dev *hdev = conn->hdev; struct sk_buff *list; @@ -2207,6 +2216,7 @@ void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags) queue_work(hdev->workqueue, &hdev->tx_work); } +EXPORT_SYMBOL(hci_send_acl); /* Send SCO data */ void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb) @@ -2229,12 +2239,12 @@ void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb) skb_queue_tail(&conn->data_q, skb); queue_work(hdev->workqueue, &hdev->tx_work); } +EXPORT_SYMBOL(hci_send_sco); /* ---- HCI TX task (outgoing data) ---- */ /* HCI Connection scheduler */ -static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, - int *quote) +static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *conn = NULL, *c; @@ -2293,7 +2303,7 @@ static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, return conn; } -static void hci_link_tx_to(struct hci_dev *hdev, __u8 type) +static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; @@ -2306,16 +2316,16 @@ static void hci_link_tx_to(struct hci_dev *hdev, __u8 type) list_for_each_entry_rcu(c, &h->list, list) { if (c->type == type && c->sent) { BT_ERR("%s killing stalled connection %s", - hdev->name, batostr(&c->dst)); - hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM); + hdev->name, batostr(&c->dst)); + hci_acl_disconn(c, 0x13); } } rcu_read_unlock(); } -static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type, - int *quote) +static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type, + int *quote) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_chan *chan = NULL; @@ -2432,7 +2442,7 @@ static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type) skb->priority = HCI_PRIO_MAX - 1; BT_DBG("chan %p skb %p promoted to %d", chan, skb, - skb->priority); + skb->priority); } if (hci_conn_num(hdev, type) == num) @@ -2449,18 +2459,18 @@ static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb) return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len); } -static void __check_timeout(struct hci_dev *hdev, unsigned int cnt) +static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt) { if (!test_bit(HCI_RAW, &hdev->flags)) { /* ACL tx timeout must be longer than maximum * link supervision timeout (40.9 seconds) */ if (!cnt && time_after(jiffies, hdev->acl_last_tx + - msecs_to_jiffies(HCI_ACL_TX_TIMEOUT))) + msecs_to_jiffies(HCI_ACL_TX_TIMEOUT))) hci_link_tx_to(hdev, ACL_LINK); } } -static void hci_sched_acl_pkt(struct hci_dev *hdev) +static inline void hci_sched_acl_pkt(struct hci_dev *hdev) { unsigned int cnt = hdev->acl_cnt; struct hci_chan *chan; @@ -2470,11 +2480,11 @@ static void hci_sched_acl_pkt(struct hci_dev *hdev) __check_timeout(hdev, cnt); while (hdev->acl_cnt && - (chan = hci_chan_sent(hdev, ACL_LINK, "e))) { + (chan = hci_chan_sent(hdev, ACL_LINK, "e))) { u32 priority = (skb_peek(&chan->data_q))->priority; while (quote-- && (skb = skb_peek(&chan->data_q))) { BT_DBG("chan %p skb %p len %d priority %u", chan, skb, - skb->len, skb->priority); + skb->len, skb->priority); /* Stop if priority has changed */ if (skb->priority < priority) @@ -2498,7 +2508,7 @@ static void hci_sched_acl_pkt(struct hci_dev *hdev) hci_prio_recalculate(hdev, ACL_LINK); } -static void hci_sched_acl_blk(struct hci_dev *hdev) +static inline void hci_sched_acl_blk(struct hci_dev *hdev) { unsigned int cnt = hdev->block_cnt; struct hci_chan *chan; @@ -2508,13 +2518,13 @@ static void hci_sched_acl_blk(struct hci_dev *hdev) __check_timeout(hdev, cnt); while (hdev->block_cnt > 0 && - (chan = hci_chan_sent(hdev, ACL_LINK, "e))) { + (chan = hci_chan_sent(hdev, ACL_LINK, "e))) { u32 priority = (skb_peek(&chan->data_q))->priority; while (quote > 0 && (skb = skb_peek(&chan->data_q))) { int blocks; BT_DBG("chan %p skb %p len %d priority %u", chan, skb, - skb->len, skb->priority); + skb->len, skb->priority); /* Stop if priority has changed */ if (skb->priority < priority) @@ -2527,7 +2537,7 @@ static void hci_sched_acl_blk(struct hci_dev *hdev) return; hci_conn_enter_active_mode(chan->conn, - bt_cb(skb)->force_active); + bt_cb(skb)->force_active); hci_send_frame(skb); hdev->acl_last_tx = jiffies; @@ -2544,7 +2554,7 @@ static void hci_sched_acl_blk(struct hci_dev *hdev) hci_prio_recalculate(hdev, ACL_LINK); } -static void hci_sched_acl(struct hci_dev *hdev) +static inline void hci_sched_acl(struct hci_dev *hdev) { BT_DBG("%s", hdev->name); @@ -2563,7 +2573,7 @@ static void hci_sched_acl(struct hci_dev *hdev) } /* Schedule SCO */ -static void hci_sched_sco(struct hci_dev *hdev) +static inline void hci_sched_sco(struct hci_dev *hdev) { struct hci_conn *conn; struct sk_buff *skb; @@ -2586,7 +2596,7 @@ static void hci_sched_sco(struct hci_dev *hdev) } } -static void hci_sched_esco(struct hci_dev *hdev) +static inline void hci_sched_esco(struct hci_dev *hdev) { struct hci_conn *conn; struct sk_buff *skb; @@ -2597,8 +2607,7 @@ static void hci_sched_esco(struct hci_dev *hdev) if (!hci_conn_num(hdev, ESCO_LINK)) return; - while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, - "e))) { + while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, "e))) { while (quote-- && (skb = skb_dequeue(&conn->data_q))) { BT_DBG("skb %p len %d", skb, skb->len); hci_send_frame(skb); @@ -2610,7 +2619,7 @@ static void hci_sched_esco(struct hci_dev *hdev) } } -static void hci_sched_le(struct hci_dev *hdev) +static inline void hci_sched_le(struct hci_dev *hdev) { struct hci_chan *chan; struct sk_buff *skb; @@ -2625,7 +2634,7 @@ static void hci_sched_le(struct hci_dev *hdev) /* LE tx timeout must be longer than maximum * link supervision timeout (40.9 seconds) */ if (!hdev->le_cnt && hdev->le_pkts && - time_after(jiffies, hdev->le_last_tx + HZ * 45)) + time_after(jiffies, hdev->le_last_tx + HZ * 45)) hci_link_tx_to(hdev, LE_LINK); } @@ -2635,7 +2644,7 @@ static void hci_sched_le(struct hci_dev *hdev) u32 priority = (skb_peek(&chan->data_q))->priority; while (quote-- && (skb = skb_peek(&chan->data_q))) { BT_DBG("chan %p skb %p len %d priority %u", chan, skb, - skb->len, skb->priority); + skb->len, skb->priority); /* Stop if priority has changed */ if (skb->priority < priority) @@ -2667,7 +2676,7 @@ static void hci_tx_work(struct work_struct *work) struct sk_buff *skb; BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt, - hdev->sco_cnt, hdev->le_cnt); + hdev->sco_cnt, hdev->le_cnt); /* Schedule queues and send stuff to HCI driver */ @@ -2687,7 +2696,7 @@ static void hci_tx_work(struct work_struct *work) /* ----- HCI RX task (incoming data processing) ----- */ /* ACL data packet */ -static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_acl_hdr *hdr = (void *) skb->data; struct hci_conn *conn; @@ -2699,8 +2708,7 @@ static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) flags = hci_flags(handle); handle = hci_handle(handle); - BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, - handle, flags); + BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags); hdev->stat.acl_rx++; @@ -2724,14 +2732,14 @@ static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) return; } else { BT_ERR("%s ACL packet for unknown connection handle %d", - hdev->name, handle); + hdev->name, handle); } kfree_skb(skb); } /* SCO data packet */ -static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_sco_hdr *hdr = (void *) skb->data; struct hci_conn *conn; @@ -2755,7 +2763,7 @@ static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb) return; } else { BT_ERR("%s SCO packet for unknown connection handle %d", - hdev->name, handle); + hdev->name, handle); } kfree_skb(skb); diff --git a/trunk/net/bluetooth/hci_event.c b/trunk/net/bluetooth/hci_event.c index 1ba929c05d0d..4eefb7f65cf6 100644 --- a/trunk/net/bluetooth/hci_event.c +++ b/trunk/net/bluetooth/hci_event.c @@ -24,7 +24,20 @@ /* Bluetooth HCI event handling. */ -#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include #include #include @@ -82,8 +95,7 @@ static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb) hci_conn_check_pending(hdev); } -static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, - struct sk_buff *skb) +static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb) { BT_DBG("%s", hdev->name); } @@ -154,8 +166,7 @@ static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } -static void hci_cc_read_def_link_policy(struct hci_dev *hdev, - struct sk_buff *skb) +static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_rp_read_def_link_policy *rp = (void *) skb->data; @@ -167,8 +178,7 @@ static void hci_cc_read_def_link_policy(struct hci_dev *hdev, hdev->link_policy = __le16_to_cpu(rp->policy); } -static void hci_cc_write_def_link_policy(struct hci_dev *hdev, - struct sk_buff *skb) +static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb) { __u8 status = *((__u8 *) skb->data); void *sent; @@ -319,7 +329,7 @@ static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb) if (hdev->discov_timeout > 0) { int to = msecs_to_jiffies(hdev->discov_timeout * 1000); queue_delayed_work(hdev->workqueue, &hdev->discov_off, - to); + to); } } else if (old_iscan) mgmt_discoverable(hdev, 0); @@ -348,7 +358,7 @@ static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) memcpy(hdev->dev_class, rp->dev_class, 3); BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name, - hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]); + hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]); } static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb) @@ -396,8 +406,7 @@ static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); } -static void hci_cc_write_voice_setting(struct hci_dev *hdev, - struct sk_buff *skb) +static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb) { __u8 status = *((__u8 *) skb->data); __u16 setting; @@ -464,7 +473,7 @@ static u8 hci_get_inquiry_mode(struct hci_dev *hdev) return 1; if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 && - hdev->lmp_subver == 0x0757) + hdev->lmp_subver == 0x0757) return 1; if (hdev->manufacturer == 15) { @@ -477,7 +486,7 @@ static u8 hci_get_inquiry_mode(struct hci_dev *hdev) } if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 && - hdev->lmp_subver == 0x1805) + hdev->lmp_subver == 0x1805) return 1; return 0; @@ -557,7 +566,7 @@ static void hci_setup(struct hci_dev *hdev) if (hdev->hci_ver > BLUETOOTH_VER_1_1) hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL); - if (lmp_ssp_capable(hdev)) { + if (hdev->features[6] & LMP_SIMPLE_PAIR) { if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) { u8 mode = 0x01; hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, @@ -609,7 +618,8 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb) hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver); BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name, - hdev->manufacturer, hdev->hci_ver, hdev->hci_rev); + hdev->manufacturer, + hdev->hci_ver, hdev->hci_rev); if (test_bit(HCI_INIT, &hdev->flags)) hci_setup(hdev); @@ -636,8 +646,7 @@ static void hci_setup_link_policy(struct hci_dev *hdev) hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp); } -static void hci_cc_read_local_commands(struct hci_dev *hdev, - struct sk_buff *skb) +static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_rp_read_local_commands *rp = (void *) skb->data; @@ -655,8 +664,7 @@ static void hci_cc_read_local_commands(struct hci_dev *hdev, hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status); } -static void hci_cc_read_local_features(struct hci_dev *hdev, - struct sk_buff *skb) +static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_rp_read_local_features *rp = (void *) skb->data; @@ -705,10 +713,10 @@ static void hci_cc_read_local_features(struct hci_dev *hdev, hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5); BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name, - hdev->features[0], hdev->features[1], - hdev->features[2], hdev->features[3], - hdev->features[4], hdev->features[5], - hdev->features[6], hdev->features[7]); + hdev->features[0], hdev->features[1], + hdev->features[2], hdev->features[3], + hdev->features[4], hdev->features[5], + hdev->features[6], hdev->features[7]); } static void hci_set_le_support(struct hci_dev *hdev) @@ -728,7 +736,7 @@ static void hci_set_le_support(struct hci_dev *hdev) } static void hci_cc_read_local_ext_features(struct hci_dev *hdev, - struct sk_buff *skb) + struct sk_buff *skb) { struct hci_rp_read_local_ext_features *rp = (void *) skb->data; @@ -754,7 +762,7 @@ static void hci_cc_read_local_ext_features(struct hci_dev *hdev, } static void hci_cc_read_flow_control_mode(struct hci_dev *hdev, - struct sk_buff *skb) + struct sk_buff *skb) { struct hci_rp_read_flow_control_mode *rp = (void *) skb->data; @@ -790,8 +798,9 @@ static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb) hdev->acl_cnt = hdev->acl_pkts; hdev->sco_cnt = hdev->sco_pkts; - BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu, - hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts); + BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, + hdev->acl_mtu, hdev->acl_pkts, + hdev->sco_mtu, hdev->sco_pkts); } static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb) @@ -807,7 +816,7 @@ static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb) } static void hci_cc_read_data_block_size(struct hci_dev *hdev, - struct sk_buff *skb) + struct sk_buff *skb) { struct hci_rp_read_data_block_size *rp = (void *) skb->data; @@ -823,7 +832,7 @@ static void hci_cc_read_data_block_size(struct hci_dev *hdev, hdev->block_cnt = hdev->num_blocks; BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu, - hdev->block_cnt, hdev->block_len); + hdev->block_cnt, hdev->block_len); hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status); } @@ -838,7 +847,7 @@ static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb) } static void hci_cc_read_local_amp_info(struct hci_dev *hdev, - struct sk_buff *skb) + struct sk_buff *skb) { struct hci_rp_read_local_amp_info *rp = (void *) skb->data; @@ -862,7 +871,7 @@ static void hci_cc_read_local_amp_info(struct hci_dev *hdev, } static void hci_cc_delete_stored_link_key(struct hci_dev *hdev, - struct sk_buff *skb) + struct sk_buff *skb) { __u8 status = *((__u8 *) skb->data); @@ -881,7 +890,7 @@ static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb) } static void hci_cc_write_inquiry_mode(struct hci_dev *hdev, - struct sk_buff *skb) + struct sk_buff *skb) { __u8 status = *((__u8 *) skb->data); @@ -891,7 +900,7 @@ static void hci_cc_write_inquiry_mode(struct hci_dev *hdev, } static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, - struct sk_buff *skb) + struct sk_buff *skb) { struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data; @@ -950,7 +959,7 @@ static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb) if (test_bit(HCI_MGMT, &hdev->dev_flags)) mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr, - rp->status); + rp->status); hci_dev_unlock(hdev); } @@ -991,7 +1000,7 @@ static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb) } static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, - struct sk_buff *skb) + struct sk_buff *skb) { struct hci_rp_user_confirm_reply *rp = (void *) skb->data; @@ -1022,7 +1031,7 @@ static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb) } static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, - struct sk_buff *skb) + struct sk_buff *skb) { struct hci_rp_user_confirm_reply *rp = (void *) skb->data; @@ -1038,7 +1047,7 @@ static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, } static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev, - struct sk_buff *skb) + struct sk_buff *skb) { struct hci_rp_read_local_oob_data *rp = (void *) skb->data; @@ -1067,7 +1076,7 @@ static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb) } static void hci_cc_le_set_scan_enable(struct hci_dev *hdev, - struct sk_buff *skb) + struct sk_buff *skb) { struct hci_cp_le_set_scan_enable *cp; __u8 status = *((__u8 *) skb->data); @@ -1147,8 +1156,8 @@ static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb) hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status); } -static void hci_cc_write_le_host_supported(struct hci_dev *hdev, - struct sk_buff *skb) +static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev, + struct sk_buff *skb) { struct hci_cp_write_le_host_supported *sent; __u8 status = *((__u8 *) skb->data); @@ -1167,13 +1176,13 @@ static void hci_cc_write_le_host_supported(struct hci_dev *hdev, } if (test_bit(HCI_MGMT, &hdev->dev_flags) && - !test_bit(HCI_INIT, &hdev->flags)) + !test_bit(HCI_INIT, &hdev->flags)) mgmt_le_enable_complete(hdev, sent->le, status); hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status); } -static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) +static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) { BT_DBG("%s status 0x%x", hdev->name, status); @@ -1194,7 +1203,7 @@ static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) hci_dev_unlock(hdev); } -static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) +static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) { struct hci_cp_create_conn *cp; struct hci_conn *conn; @@ -1324,7 +1333,7 @@ static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status) } static int hci_outgoing_auth_needed(struct hci_dev *hdev, - struct hci_conn *conn) + struct hci_conn *conn) { if (conn->state != BT_CONFIG || !conn->out) return 0; @@ -1334,14 +1343,15 @@ static int hci_outgoing_auth_needed(struct hci_dev *hdev, /* Only request authentication for SSP connections or non-SSP * devices with sec_level HIGH or if MITM protection is requested */ - if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) && - conn->pending_sec_level != BT_SECURITY_HIGH) + if (!hci_conn_ssp_enabled(conn) && + conn->pending_sec_level != BT_SECURITY_HIGH && + !(conn->auth_type & 0x01)) return 0; return 1; } -static int hci_resolve_name(struct hci_dev *hdev, +static inline int hci_resolve_name(struct hci_dev *hdev, struct inquiry_entry *e) { struct hci_cp_remote_name_req cp; @@ -1628,7 +1638,7 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status) conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr); BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr), - conn); + conn); if (status) { if (conn && conn->state == BT_CONNECT) { @@ -1658,7 +1668,7 @@ static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status) BT_DBG("%s status 0x%x", hdev->name, status); } -static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { __u8 status = *((__u8 *) skb->data); struct discovery_state *discov = &hdev->discovery; @@ -1698,7 +1708,7 @@ static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } -static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct inquiry_data data; struct inquiry_info *info = (void *) (skb->data + 1); @@ -1735,7 +1745,7 @@ static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } -static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_conn_complete *ev = (void *) skb->data; struct hci_conn *conn; @@ -1813,18 +1823,18 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_conn_check_pending(hdev); } -static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_conn_request *ev = (void *) skb->data; int mask = hdev->link_mode; - BT_DBG("%s bdaddr %s type 0x%x", hdev->name, batostr(&ev->bdaddr), - ev->link_type); + BT_DBG("%s bdaddr %s type 0x%x", hdev->name, + batostr(&ev->bdaddr), ev->link_type); mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type); if ((mask & HCI_LM_ACCEPT) && - !hci_blacklist_lookup(hdev, &ev->bdaddr)) { + !hci_blacklist_lookup(hdev, &ev->bdaddr)) { /* Connection accepted */ struct inquiry_entry *ie; struct hci_conn *conn; @@ -1835,8 +1845,7 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) if (ie) memcpy(ie->data.dev_class, ev->dev_class, 3); - conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, - &ev->bdaddr); + conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); if (!conn) { conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr); if (!conn) { @@ -1869,9 +1878,9 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) bacpy(&cp.bdaddr, &ev->bdaddr); cp.pkt_type = cpu_to_le16(conn->pkt_type); - cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40); - cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40); - cp.max_latency = __constant_cpu_to_le16(0xffff); + cp.tx_bandwidth = cpu_to_le32(0x00001f40); + cp.rx_bandwidth = cpu_to_le32(0x00001f40); + cp.max_latency = cpu_to_le16(0xffff); cp.content_format = cpu_to_le16(hdev->voice_setting); cp.retrans_effort = 0xff; @@ -1888,7 +1897,7 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb) } } -static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_disconn_complete *ev = (void *) skb->data; struct hci_conn *conn; @@ -1905,10 +1914,10 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) conn->state = BT_CLOSED; if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) && - (conn->type == ACL_LINK || conn->type == LE_LINK)) { + (conn->type == ACL_LINK || conn->type == LE_LINK)) { if (ev->status != 0) mgmt_disconnect_failed(hdev, &conn->dst, conn->type, - conn->dst_type, ev->status); + conn->dst_type, ev->status); else mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type); @@ -1925,7 +1934,7 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } -static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_auth_complete *ev = (void *) skb->data; struct hci_conn *conn; @@ -1940,7 +1949,7 @@ static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) if (!ev->status) { if (!hci_conn_ssp_enabled(conn) && - test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) { + test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) { BT_INFO("re-auth of legacy device is not possible."); } else { conn->link_mode |= HCI_LM_AUTH; @@ -1960,7 +1969,7 @@ static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) cp.handle = ev->handle; cp.encrypt = 0x01; hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), - &cp); + &cp); } else { conn->state = BT_CONNECTED; hci_proto_connect_cfm(conn, ev->status); @@ -1980,7 +1989,7 @@ static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) cp.handle = ev->handle; cp.encrypt = 0x01; hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), - &cp); + &cp); } else { clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); hci_encrypt_cfm(conn, ev->status, 0x00); @@ -1991,7 +2000,7 @@ static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } -static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_remote_name *ev = (void *) skb->data; struct hci_conn *conn; @@ -2030,7 +2039,7 @@ static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } -static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_encrypt_change *ev = (void *) skb->data; struct hci_conn *conn; @@ -2073,8 +2082,7 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } -static void hci_change_link_key_complete_evt(struct hci_dev *hdev, - struct sk_buff *skb) +static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_change_link_key_complete *ev = (void *) skb->data; struct hci_conn *conn; @@ -2096,8 +2104,7 @@ static void hci_change_link_key_complete_evt(struct hci_dev *hdev, hci_dev_unlock(hdev); } -static void hci_remote_features_evt(struct hci_dev *hdev, - struct sk_buff *skb) +static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_remote_features *ev = (void *) skb->data; struct hci_conn *conn; @@ -2121,7 +2128,7 @@ static void hci_remote_features_evt(struct hci_dev *hdev, cp.handle = ev->handle; cp.page = 0x01; hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES, - sizeof(cp), &cp); + sizeof(cp), &cp); goto unlock; } @@ -2146,18 +2153,17 @@ static void hci_remote_features_evt(struct hci_dev *hdev, hci_dev_unlock(hdev); } -static void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb) { BT_DBG("%s", hdev->name); } -static void hci_qos_setup_complete_evt(struct hci_dev *hdev, - struct sk_buff *skb) +static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { BT_DBG("%s", hdev->name); } -static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_cmd_complete *ev = (void *) skb->data; __u16 opcode; @@ -2378,7 +2384,7 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) } } -static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_cmd_status *ev = (void *) skb->data; __u16 opcode; @@ -2459,7 +2465,7 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb) } } -static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_role_change *ev = (void *) skb->data; struct hci_conn *conn; @@ -2485,7 +2491,7 @@ static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } -static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_num_comp_pkts *ev = (void *) skb->data; int i; @@ -2496,7 +2502,7 @@ static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb) } if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) + - ev->num_hndl * sizeof(struct hci_comp_pkts_info)) { + ev->num_hndl * sizeof(struct hci_comp_pkts_info)) { BT_DBG("%s bad parameters", hdev->name); return; } @@ -2551,7 +2557,8 @@ static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb) queue_work(hdev->workqueue, &hdev->tx_work); } -static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev, + struct sk_buff *skb) { struct hci_ev_num_comp_blocks *ev = (void *) skb->data; int i; @@ -2562,13 +2569,13 @@ static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb) } if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) + - ev->num_hndl * sizeof(struct hci_comp_blocks_info)) { + ev->num_hndl * sizeof(struct hci_comp_blocks_info)) { BT_DBG("%s bad parameters", hdev->name); return; } BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks, - ev->num_hndl); + ev->num_hndl); for (i = 0; i < ev->num_hndl; i++) { struct hci_comp_blocks_info *info = &ev->handles[i]; @@ -2600,7 +2607,7 @@ static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb) queue_work(hdev->workqueue, &hdev->tx_work); } -static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_mode_change *ev = (void *) skb->data; struct hci_conn *conn; @@ -2614,8 +2621,7 @@ static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb) conn->mode = ev->mode; conn->interval = __le16_to_cpu(ev->interval); - if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, - &conn->flags)) { + if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) { if (conn->mode == HCI_CM_ACTIVE) set_bit(HCI_CONN_POWER_SAVE, &conn->flags); else @@ -2629,7 +2635,7 @@ static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } -static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_pin_code_req *ev = (void *) skb->data; struct hci_conn *conn; @@ -2650,7 +2656,7 @@ static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb) if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags)) hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, - sizeof(ev->bdaddr), &ev->bdaddr); + sizeof(ev->bdaddr), &ev->bdaddr); else if (test_bit(HCI_MGMT, &hdev->dev_flags)) { u8 secure; @@ -2666,7 +2672,7 @@ static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } -static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_link_key_req *ev = (void *) skb->data; struct hci_cp_link_key_reply cp; @@ -2683,15 +2689,15 @@ static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) key = hci_find_link_key(hdev, &ev->bdaddr); if (!key) { BT_DBG("%s link key not found for %s", hdev->name, - batostr(&ev->bdaddr)); + batostr(&ev->bdaddr)); goto not_found; } BT_DBG("%s found key type %u for %s", hdev->name, key->type, - batostr(&ev->bdaddr)); + batostr(&ev->bdaddr)); if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) && - key->type == HCI_LK_DEBUG_COMBINATION) { + key->type == HCI_LK_DEBUG_COMBINATION) { BT_DBG("%s ignoring debug key", hdev->name); goto not_found; } @@ -2699,15 +2705,16 @@ static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); if (conn) { if (key->type == HCI_LK_UNAUTH_COMBINATION && - conn->auth_type != 0xff && (conn->auth_type & 0x01)) { + conn->auth_type != 0xff && + (conn->auth_type & 0x01)) { BT_DBG("%s ignoring unauthenticated key", hdev->name); goto not_found; } if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 && - conn->pending_sec_level == BT_SECURITY_HIGH) { - BT_DBG("%s ignoring key unauthenticated for high security", - hdev->name); + conn->pending_sec_level == BT_SECURITY_HIGH) { + BT_DBG("%s ignoring key unauthenticated for high \ + security", hdev->name); goto not_found; } @@ -2716,7 +2723,7 @@ static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) } bacpy(&cp.bdaddr, &ev->bdaddr); - memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE); + memcpy(cp.link_key, key->val, 16); hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp); @@ -2729,7 +2736,7 @@ static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } -static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_link_key_notify *ev = (void *) skb->data; struct hci_conn *conn; @@ -2753,12 +2760,12 @@ static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb) if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags)) hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key, - ev->key_type, pin_len); + ev->key_type, pin_len); hci_dev_unlock(hdev); } -static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_clock_offset *ev = (void *) skb->data; struct hci_conn *conn; @@ -2781,7 +2788,7 @@ static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } -static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_pkt_type_change *ev = (void *) skb->data; struct hci_conn *conn; @@ -2797,7 +2804,7 @@ static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } -static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_pscan_rep_mode *ev = (void *) skb->data; struct inquiry_entry *ie; @@ -2815,8 +2822,7 @@ static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } -static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, - struct sk_buff *skb) +static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct inquiry_data data; int num_rsp = *((__u8 *) skb->data); @@ -2875,8 +2881,7 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, hci_dev_unlock(hdev); } -static void hci_remote_ext_features_evt(struct hci_dev *hdev, - struct sk_buff *skb) +static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_remote_ext_features *ev = (void *) skb->data; struct hci_conn *conn; @@ -2924,8 +2929,7 @@ static void hci_remote_ext_features_evt(struct hci_dev *hdev, hci_dev_unlock(hdev); } -static void hci_sync_conn_complete_evt(struct hci_dev *hdev, - struct sk_buff *skb) +static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_sync_conn_complete *ev = (void *) skb->data; struct hci_conn *conn; @@ -2980,20 +2984,19 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, hci_dev_unlock(hdev); } -static void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb) { BT_DBG("%s", hdev->name); } -static void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_sniff_subrate *ev = (void *) skb->data; BT_DBG("%s status %d", hdev->name, ev->status); } -static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, - struct sk_buff *skb) +static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct inquiry_data data; struct extended_inquiry_info *info = (void *) (skb->data + 1); @@ -3040,51 +3043,7 @@ static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, hci_dev_unlock(hdev); } -static void hci_key_refresh_complete_evt(struct hci_dev *hdev, - struct sk_buff *skb) -{ - struct hci_ev_key_refresh_complete *ev = (void *) skb->data; - struct hci_conn *conn; - - BT_DBG("%s status %u handle %u", hdev->name, ev->status, - __le16_to_cpu(ev->handle)); - - hci_dev_lock(hdev); - - conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); - if (!conn) - goto unlock; - - if (!ev->status) - conn->sec_level = conn->pending_sec_level; - - clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); - - if (ev->status && conn->state == BT_CONNECTED) { - hci_acl_disconn(conn, HCI_ERROR_AUTH_FAILURE); - hci_conn_put(conn); - goto unlock; - } - - if (conn->state == BT_CONFIG) { - if (!ev->status) - conn->state = BT_CONNECTED; - - hci_proto_connect_cfm(conn, ev->status); - hci_conn_put(conn); - } else { - hci_auth_cfm(conn, ev->status); - - hci_conn_hold(conn); - conn->disc_timeout = HCI_DISCONN_TIMEOUT; - hci_conn_put(conn); - } - -unlock: - hci_dev_unlock(hdev); -} - -static u8 hci_get_auth_req(struct hci_conn *conn) +static inline u8 hci_get_auth_req(struct hci_conn *conn) { /* If remote requests dedicated bonding follow that lead */ if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) { @@ -3103,7 +3062,7 @@ static u8 hci_get_auth_req(struct hci_conn *conn) return conn->auth_type; } -static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_io_capa_request *ev = (void *) skb->data; struct hci_conn *conn; @@ -3122,7 +3081,7 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) goto unlock; if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) || - (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) { + (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) { struct hci_cp_io_capability_reply cp; bacpy(&cp.bdaddr, &ev->bdaddr); @@ -3133,14 +3092,14 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) conn->auth_type = hci_get_auth_req(conn); cp.authentication = conn->auth_type; - if (hci_find_remote_oob_data(hdev, &conn->dst) && - (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags))) + if ((conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)) && + hci_find_remote_oob_data(hdev, &conn->dst)) cp.oob_data = 0x01; else cp.oob_data = 0x00; hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY, - sizeof(cp), &cp); + sizeof(cp), &cp); } else { struct hci_cp_io_capability_neg_reply cp; @@ -3148,14 +3107,14 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb) cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED; hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY, - sizeof(cp), &cp); + sizeof(cp), &cp); } unlock: hci_dev_unlock(hdev); } -static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_io_capa_reply *ev = (void *) skb->data; struct hci_conn *conn; @@ -3177,8 +3136,8 @@ static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } -static void hci_user_confirm_request_evt(struct hci_dev *hdev, - struct sk_buff *skb) +static inline void hci_user_confirm_request_evt(struct hci_dev *hdev, + struct sk_buff *skb) { struct hci_ev_user_confirm_req *ev = (void *) skb->data; int loc_mitm, rem_mitm, confirm_hint = 0; @@ -3206,13 +3165,13 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev, if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) { BT_DBG("Rejecting request: remote device can't provide MITM"); hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY, - sizeof(ev->bdaddr), &ev->bdaddr); + sizeof(ev->bdaddr), &ev->bdaddr); goto unlock; } /* If no side requires MITM protection; auto-accept */ if ((!loc_mitm || conn->remote_cap == 0x03) && - (!rem_mitm || conn->io_capability == 0x03)) { + (!rem_mitm || conn->io_capability == 0x03)) { /* If we're not the initiators request authorization to * proceed from user space (mgmt_user_confirm with @@ -3224,7 +3183,7 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev, } BT_DBG("Auto-accept of user confirmation with %ums delay", - hdev->auto_accept_delay); + hdev->auto_accept_delay); if (hdev->auto_accept_delay > 0) { int delay = msecs_to_jiffies(hdev->auto_accept_delay); @@ -3233,7 +3192,7 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev, } hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, - sizeof(ev->bdaddr), &ev->bdaddr); + sizeof(ev->bdaddr), &ev->bdaddr); goto unlock; } @@ -3245,8 +3204,8 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev, hci_dev_unlock(hdev); } -static void hci_user_passkey_request_evt(struct hci_dev *hdev, - struct sk_buff *skb) +static inline void hci_user_passkey_request_evt(struct hci_dev *hdev, + struct sk_buff *skb) { struct hci_ev_user_passkey_req *ev = (void *) skb->data; @@ -3260,8 +3219,7 @@ static void hci_user_passkey_request_evt(struct hci_dev *hdev, hci_dev_unlock(hdev); } -static void hci_simple_pair_complete_evt(struct hci_dev *hdev, - struct sk_buff *skb) +static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_simple_pair_complete *ev = (void *) skb->data; struct hci_conn *conn; @@ -3289,8 +3247,7 @@ static void hci_simple_pair_complete_evt(struct hci_dev *hdev, hci_dev_unlock(hdev); } -static void hci_remote_host_features_evt(struct hci_dev *hdev, - struct sk_buff *skb) +static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_remote_host_features *ev = (void *) skb->data; struct inquiry_entry *ie; @@ -3306,8 +3263,8 @@ static void hci_remote_host_features_evt(struct hci_dev *hdev, hci_dev_unlock(hdev); } -static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, - struct sk_buff *skb) +static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev, + struct sk_buff *skb) { struct hci_ev_remote_oob_data_request *ev = (void *) skb->data; struct oob_data *data; @@ -3328,20 +3285,20 @@ static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer)); hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp), - &cp); + &cp); } else { struct hci_cp_remote_oob_data_neg_reply cp; bacpy(&cp.bdaddr, &ev->bdaddr); hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp), - &cp); + &cp); } unlock: hci_dev_unlock(hdev); } -static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_le_conn_complete *ev = (void *) skb->data; struct hci_conn *conn; @@ -3350,19 +3307,6 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_lock(hdev); - if (ev->status) { - conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); - if (!conn) - goto unlock; - - mgmt_connect_failed(hdev, &conn->dst, conn->type, - conn->dst_type, ev->status); - hci_proto_connect_cfm(conn, ev->status); - conn->state = BT_CLOSED; - hci_conn_del(conn); - goto unlock; - } - conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr); if (!conn) { conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr); @@ -3375,6 +3319,15 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) conn->dst_type = ev->bdaddr_type; } + if (ev->status) { + mgmt_connect_failed(hdev, &ev->bdaddr, conn->type, + conn->dst_type, ev->status); + hci_proto_connect_cfm(conn, ev->status); + conn->state = BT_CLOSED; + hci_conn_del(conn); + goto unlock; + } + if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) mgmt_device_connected(hdev, &ev->bdaddr, conn->type, conn->dst_type, 0, NULL, 0, NULL); @@ -3392,7 +3345,8 @@ static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } -static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_le_adv_report_evt(struct hci_dev *hdev, + struct sk_buff *skb) { u8 num_reports = skb->data[0]; void *ptr = &skb->data[1]; @@ -3413,7 +3367,8 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } -static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_le_ltk_request_evt(struct hci_dev *hdev, + struct sk_buff *skb) { struct hci_ev_le_ltk_req *ev = (void *) skb->data; struct hci_cp_le_ltk_reply cp; @@ -3456,7 +3411,7 @@ static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb) hci_dev_unlock(hdev); } -static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb) +static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_le_meta *le_ev = (void *) skb->data; @@ -3604,10 +3559,6 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) hci_extended_inquiry_result_evt(hdev, skb); break; - case HCI_EV_KEY_REFRESH_COMPLETE: - hci_key_refresh_complete_evt(hdev, skb); - break; - case HCI_EV_IO_CAPA_REQUEST: hci_io_capa_request_evt(hdev, skb); break; diff --git a/trunk/net/bluetooth/hci_sock.c b/trunk/net/bluetooth/hci_sock.c index a7f04de03d79..5914623f426a 100644 --- a/trunk/net/bluetooth/hci_sock.c +++ b/trunk/net/bluetooth/hci_sock.c @@ -24,7 +24,25 @@ /* Bluetooth HCI sockets. */ -#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include #include #include @@ -95,12 +113,11 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb) flt = &hci_pi(sk)->filter; if (!test_bit((bt_cb(skb)->pkt_type == HCI_VENDOR_PKT) ? - 0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS), - &flt->type_mask)) + 0 : (bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS), &flt->type_mask)) continue; if (bt_cb(skb)->pkt_type == HCI_EVENT_PKT) { - int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS); + register int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS); if (!hci_test_bit(evt, &flt->event_mask)) continue; @@ -223,8 +240,7 @@ void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb) struct hci_mon_hdr *hdr; /* Create a private copy with headroom */ - skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE, - GFP_ATOMIC); + skb_copy = __pskb_copy(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC); if (!skb_copy) continue; @@ -479,8 +495,7 @@ static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg) } /* Ioctls that require bound socket */ -static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, - unsigned long arg) +static inline int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg) { struct hci_dev *hdev = hci_pi(sk)->hdev; @@ -525,8 +540,7 @@ static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, } } -static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, - unsigned long arg) +static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; void __user *argp = (void __user *) arg; @@ -587,8 +601,7 @@ static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, } } -static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, - int addr_len) +static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) { struct sockaddr_hci haddr; struct sock *sk = sock->sk; @@ -677,8 +690,7 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, return err; } -static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, - int *addr_len, int peer) +static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer) { struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr; struct sock *sk = sock->sk; @@ -699,15 +711,13 @@ static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, return 0; } -static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, - struct sk_buff *skb) +static inline void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) { __u32 mask = hci_pi(sk)->cmsg_mask; if (mask & HCI_CMSG_DIR) { int incoming = bt_cb(skb)->incoming; - put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming), - &incoming); + put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming), &incoming); } if (mask & HCI_CMSG_TSTAMP) { @@ -737,7 +747,7 @@ static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, } static int hci_sock_recvmsg(struct kiocb *iocb, struct socket *sock, - struct msghdr *msg, size_t len, int flags) + struct msghdr *msg, size_t len, int flags) { int noblock = flags & MSG_DONTWAIT; struct sock *sk = sock->sk; @@ -847,9 +857,8 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock, u16 ocf = hci_opcode_ocf(opcode); if (((ogf > HCI_SFLT_MAX_OGF) || - !hci_test_bit(ocf & HCI_FLT_OCF_BITS, - &hci_sec_filter.ocf_mask[ogf])) && - !capable(CAP_NET_RAW)) { + !hci_test_bit(ocf & HCI_FLT_OCF_BITS, &hci_sec_filter.ocf_mask[ogf])) && + !capable(CAP_NET_RAW)) { err = -EPERM; goto drop; } @@ -882,8 +891,7 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock, goto done; } -static int hci_sock_setsockopt(struct socket *sock, int level, int optname, - char __user *optval, unsigned int len) +static int hci_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int len) { struct hci_ufilter uf = { .opcode = 0 }; struct sock *sk = sock->sk; @@ -965,8 +973,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname, return err; } -static int hci_sock_getsockopt(struct socket *sock, int level, int optname, - char __user *optval, int __user *optlen) +static int hci_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct hci_ufilter uf; struct sock *sk = sock->sk; diff --git a/trunk/net/bluetooth/hci_sysfs.c b/trunk/net/bluetooth/hci_sysfs.c index a20e61c3653d..937f3187eafa 100644 --- a/trunk/net/bluetooth/hci_sysfs.c +++ b/trunk/net/bluetooth/hci_sysfs.c @@ -1,6 +1,10 @@ /* Bluetooth HCI driver model support. */ +#include +#include +#include #include +#include #include #include @@ -27,30 +31,27 @@ static inline char *link_typetostr(int type) } } -static ssize_t show_link_type(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t show_link_type(struct device *dev, struct device_attribute *attr, char *buf) { struct hci_conn *conn = to_hci_conn(dev); return sprintf(buf, "%s\n", link_typetostr(conn->type)); } -static ssize_t show_link_address(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t show_link_address(struct device *dev, struct device_attribute *attr, char *buf) { struct hci_conn *conn = to_hci_conn(dev); return sprintf(buf, "%s\n", batostr(&conn->dst)); } -static ssize_t show_link_features(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t show_link_features(struct device *dev, struct device_attribute *attr, char *buf) { struct hci_conn *conn = to_hci_conn(dev); return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", - conn->features[0], conn->features[1], - conn->features[2], conn->features[3], - conn->features[4], conn->features[5], - conn->features[6], conn->features[7]); + conn->features[0], conn->features[1], + conn->features[2], conn->features[3], + conn->features[4], conn->features[5], + conn->features[6], conn->features[7]); } #define LINK_ATTR(_name, _mode, _show, _store) \ @@ -184,22 +185,19 @@ static inline char *host_typetostr(int type) } } -static ssize_t show_bus(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t show_bus(struct device *dev, struct device_attribute *attr, char *buf) { struct hci_dev *hdev = to_hci_dev(dev); return sprintf(buf, "%s\n", host_bustostr(hdev->bus)); } -static ssize_t show_type(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t show_type(struct device *dev, struct device_attribute *attr, char *buf) { struct hci_dev *hdev = to_hci_dev(dev); return sprintf(buf, "%s\n", host_typetostr(hdev->dev_type)); } -static ssize_t show_name(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t show_name(struct device *dev, struct device_attribute *attr, char *buf) { struct hci_dev *hdev = to_hci_dev(dev); char name[HCI_MAX_NAME_LENGTH + 1]; @@ -212,64 +210,55 @@ static ssize_t show_name(struct device *dev, return sprintf(buf, "%s\n", name); } -static ssize_t show_class(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t show_class(struct device *dev, struct device_attribute *attr, char *buf) { struct hci_dev *hdev = to_hci_dev(dev); - return sprintf(buf, "0x%.2x%.2x%.2x\n", hdev->dev_class[2], - hdev->dev_class[1], hdev->dev_class[0]); + return sprintf(buf, "0x%.2x%.2x%.2x\n", + hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]); } -static ssize_t show_address(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t show_address(struct device *dev, struct device_attribute *attr, char *buf) { struct hci_dev *hdev = to_hci_dev(dev); return sprintf(buf, "%s\n", batostr(&hdev->bdaddr)); } -static ssize_t show_features(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t show_features(struct device *dev, struct device_attribute *attr, char *buf) { struct hci_dev *hdev = to_hci_dev(dev); return sprintf(buf, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", - hdev->features[0], hdev->features[1], - hdev->features[2], hdev->features[3], - hdev->features[4], hdev->features[5], - hdev->features[6], hdev->features[7]); + hdev->features[0], hdev->features[1], + hdev->features[2], hdev->features[3], + hdev->features[4], hdev->features[5], + hdev->features[6], hdev->features[7]); } -static ssize_t show_manufacturer(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t show_manufacturer(struct device *dev, struct device_attribute *attr, char *buf) { struct hci_dev *hdev = to_hci_dev(dev); return sprintf(buf, "%d\n", hdev->manufacturer); } -static ssize_t show_hci_version(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t show_hci_version(struct device *dev, struct device_attribute *attr, char *buf) { struct hci_dev *hdev = to_hci_dev(dev); return sprintf(buf, "%d\n", hdev->hci_ver); } -static ssize_t show_hci_revision(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t show_hci_revision(struct device *dev, struct device_attribute *attr, char *buf) { struct hci_dev *hdev = to_hci_dev(dev); return sprintf(buf, "%d\n", hdev->hci_rev); } -static ssize_t show_idle_timeout(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t show_idle_timeout(struct device *dev, struct device_attribute *attr, char *buf) { struct hci_dev *hdev = to_hci_dev(dev); return sprintf(buf, "%d\n", hdev->idle_timeout); } -static ssize_t store_idle_timeout(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t store_idle_timeout(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hci_dev *hdev = to_hci_dev(dev); unsigned int val; @@ -287,16 +276,13 @@ static ssize_t store_idle_timeout(struct device *dev, return count; } -static ssize_t show_sniff_max_interval(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t show_sniff_max_interval(struct device *dev, struct device_attribute *attr, char *buf) { struct hci_dev *hdev = to_hci_dev(dev); return sprintf(buf, "%d\n", hdev->sniff_max_interval); } -static ssize_t store_sniff_max_interval(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t store_sniff_max_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hci_dev *hdev = to_hci_dev(dev); u16 val; @@ -314,16 +300,13 @@ static ssize_t store_sniff_max_interval(struct device *dev, return count; } -static ssize_t show_sniff_min_interval(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t show_sniff_min_interval(struct device *dev, struct device_attribute *attr, char *buf) { struct hci_dev *hdev = to_hci_dev(dev); return sprintf(buf, "%d\n", hdev->sniff_min_interval); } -static ssize_t store_sniff_min_interval(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t store_sniff_min_interval(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hci_dev *hdev = to_hci_dev(dev); u16 val; @@ -352,11 +335,11 @@ static DEVICE_ATTR(hci_version, S_IRUGO, show_hci_version, NULL); static DEVICE_ATTR(hci_revision, S_IRUGO, show_hci_revision, NULL); static DEVICE_ATTR(idle_timeout, S_IRUGO | S_IWUSR, - show_idle_timeout, store_idle_timeout); + show_idle_timeout, store_idle_timeout); static DEVICE_ATTR(sniff_max_interval, S_IRUGO | S_IWUSR, - show_sniff_max_interval, store_sniff_max_interval); + show_sniff_max_interval, store_sniff_max_interval); static DEVICE_ATTR(sniff_min_interval, S_IRUGO | S_IWUSR, - show_sniff_min_interval, store_sniff_min_interval); + show_sniff_min_interval, store_sniff_min_interval); static struct attribute *bt_host_attrs[] = { &dev_attr_bus.attr, @@ -472,8 +455,8 @@ static void print_bt_uuid(struct seq_file *f, u8 *uuid) memcpy(&data5, &uuid[14], 2); seq_printf(f, "%.8x-%.4x-%.4x-%.4x-%.8x%.4x\n", - ntohl(data0), ntohs(data1), ntohs(data2), ntohs(data3), - ntohl(data4), ntohs(data5)); + ntohl(data0), ntohs(data1), ntohs(data2), + ntohs(data3), ntohl(data4), ntohs(data5)); } static int uuids_show(struct seq_file *f, void *p) @@ -530,7 +513,7 @@ static int auto_accept_delay_get(void *data, u64 *val) } DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get, - auto_accept_delay_set, "%llu\n"); + auto_accept_delay_set, "%llu\n"); void hci_init_sysfs(struct hci_dev *hdev) { @@ -564,15 +547,15 @@ int hci_add_sysfs(struct hci_dev *hdev) return 0; debugfs_create_file("inquiry_cache", 0444, hdev->debugfs, - hdev, &inquiry_cache_fops); + hdev, &inquiry_cache_fops); debugfs_create_file("blacklist", 0444, hdev->debugfs, - hdev, &blacklist_fops); + hdev, &blacklist_fops); debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops); debugfs_create_file("auto_accept_delay", 0444, hdev->debugfs, hdev, - &auto_accept_delay_fops); + &auto_accept_delay_fops); return 0; } diff --git a/trunk/net/bluetooth/hidp/core.c b/trunk/net/bluetooth/hidp/core.c index ccd985da6518..2c20d765b394 100644 --- a/trunk/net/bluetooth/hidp/core.c +++ b/trunk/net/bluetooth/hidp/core.c @@ -21,8 +21,27 @@ */ #include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include +#include #include +#include + +#include +#include #include #include @@ -225,8 +244,7 @@ static void hidp_input_report(struct hidp_session *session, struct sk_buff *skb) } static int __hidp_send_ctrl_message(struct hidp_session *session, - unsigned char hdr, unsigned char *data, - int size) + unsigned char hdr, unsigned char *data, int size) { struct sk_buff *skb; @@ -250,7 +268,7 @@ static int __hidp_send_ctrl_message(struct hidp_session *session, return 0; } -static int hidp_send_ctrl_message(struct hidp_session *session, +static inline int hidp_send_ctrl_message(struct hidp_session *session, unsigned char hdr, unsigned char *data, int size) { int err; @@ -453,7 +471,7 @@ static void hidp_set_timer(struct hidp_session *session) mod_timer(&session->timer, jiffies + HZ * session->idle_to); } -static void hidp_del_timer(struct hidp_session *session) +static inline void hidp_del_timer(struct hidp_session *session) { if (session->idle_to > 0) del_timer(&session->timer); diff --git a/trunk/net/bluetooth/hidp/sock.c b/trunk/net/bluetooth/hidp/sock.c index 18b3f6892a36..73a32d705c1f 100644 --- a/trunk/net/bluetooth/hidp/sock.c +++ b/trunk/net/bluetooth/hidp/sock.c @@ -20,8 +20,22 @@ SOFTWARE IS DISCLAIMED. */ -#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include +#include +#include #include "hidp.h" diff --git a/trunk/net/bluetooth/l2cap_core.c b/trunk/net/bluetooth/l2cap_core.c index 4ca88247b7c2..24f144b72a96 100644 --- a/trunk/net/bluetooth/l2cap_core.c +++ b/trunk/net/bluetooth/l2cap_core.c @@ -30,14 +30,32 @@ #include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include #include +#include + +#include #include #include #include #include -#include bool disable_ertm; @@ -55,9 +73,6 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data); static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err); -static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control, - struct sk_buff_head *skbs, u8 event); - /* ---- L2CAP channels ---- */ static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid) @@ -181,7 +196,7 @@ static void __l2cap_state_change(struct l2cap_chan *chan, int state) state_to_string(state)); chan->state = state; - chan->ops->state_change(chan, state); + chan->ops->state_change(chan->data, state); } static void l2cap_state_change(struct l2cap_chan *chan, int state) @@ -209,37 +224,6 @@ static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err) release_sock(sk); } -static void __set_retrans_timer(struct l2cap_chan *chan) -{ - if (!delayed_work_pending(&chan->monitor_timer) && - chan->retrans_timeout) { - l2cap_set_timer(chan, &chan->retrans_timer, - msecs_to_jiffies(chan->retrans_timeout)); - } -} - -static void __set_monitor_timer(struct l2cap_chan *chan) -{ - __clear_retrans_timer(chan); - if (chan->monitor_timeout) { - l2cap_set_timer(chan, &chan->monitor_timer, - msecs_to_jiffies(chan->monitor_timeout)); - } -} - -static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head, - u16 seq) -{ - struct sk_buff *skb; - - skb_queue_walk(head, skb) { - if (bt_cb(skb)->control.txseq == seq) - return skb; - } - - return NULL; -} - /* ---- L2CAP sequence number lists ---- */ /* For ERTM, ordered lists of sequence numbers must be tracked for @@ -382,7 +366,7 @@ static void l2cap_chan_timeout(struct work_struct *work) l2cap_chan_unlock(chan); - chan->ops->close(chan); + chan->ops->close(chan->data); mutex_unlock(&conn->chan_lock); l2cap_chan_put(chan); @@ -408,9 +392,6 @@ struct l2cap_chan *l2cap_chan_create(void) atomic_set(&chan->refcnt, 1); - /* This flag is cleared in l2cap_chan_ready() */ - set_bit(CONF_NOT_COMPLETE, &chan->conf_state); - BT_DBG("chan %p", chan); return chan; @@ -449,7 +430,7 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) case L2CAP_CHAN_CONN_ORIENTED: if (conn->hcon->type == LE_LINK) { /* LE connection */ - chan->omtu = L2CAP_DEFAULT_MTU; + chan->omtu = L2CAP_LE_DEFAULT_MTU; chan->scid = L2CAP_CID_LE_DATA; chan->dcid = L2CAP_CID_LE_DATA; } else { @@ -466,13 +447,6 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) chan->omtu = L2CAP_DEFAULT_MTU; break; - case L2CAP_CHAN_CONN_FIX_A2MP: - chan->scid = L2CAP_CID_A2MP; - chan->dcid = L2CAP_CID_A2MP; - chan->omtu = L2CAP_A2MP_DEFAULT_MTU; - chan->imtu = L2CAP_A2MP_DEFAULT_MTU; - break; - default: /* Raw socket can send/recv signalling messages only */ chan->scid = L2CAP_CID_SIGNALING; @@ -492,16 +466,18 @@ static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) list_add(&chan->list, &conn->chan_l); } -void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) +static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) { mutex_lock(&conn->chan_lock); __l2cap_chan_add(conn, chan); mutex_unlock(&conn->chan_lock); } -void l2cap_chan_del(struct l2cap_chan *chan, int err) +static void l2cap_chan_del(struct l2cap_chan *chan, int err) { + struct sock *sk = chan->sk; struct l2cap_conn *conn = chan->conn; + struct sock *parent = bt_sk(sk)->parent; __clear_chan_timer(chan); @@ -514,22 +490,34 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err) l2cap_chan_put(chan); chan->conn = NULL; - - if (chan->chan_type != L2CAP_CHAN_CONN_FIX_A2MP) - hci_conn_put(conn->hcon); + hci_conn_put(conn->hcon); } - if (chan->ops->teardown) - chan->ops->teardown(chan, err); + lock_sock(sk); + + __l2cap_state_change(chan, BT_CLOSED); + sock_set_flag(sk, SOCK_ZAPPED); + + if (err) + __l2cap_chan_set_err(chan, err); + + if (parent) { + bt_accept_unlink(sk); + parent->sk_data_ready(parent, 0); + } else + sk->sk_state_change(sk); + + release_sock(sk); - if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state)) + if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) && + test_bit(CONF_INPUT_DONE, &chan->conf_state))) return; - switch(chan->mode) { - case L2CAP_MODE_BASIC: - break; + skb_queue_purge(&chan->tx_q); + + if (chan->mode == L2CAP_MODE_ERTM) { + struct srej_list *l, *tmp; - case L2CAP_MODE_ERTM: __clear_retrans_timer(chan); __clear_monitor_timer(chan); __clear_ack_timer(chan); @@ -538,15 +526,30 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err) l2cap_seq_list_free(&chan->srej_list); l2cap_seq_list_free(&chan->retrans_list); + list_for_each_entry_safe(l, tmp, &chan->srej_l, list) { + list_del(&l->list); + kfree(l); + } + } +} - /* fall through */ +static void l2cap_chan_cleanup_listen(struct sock *parent) +{ + struct sock *sk; - case L2CAP_MODE_STREAMING: - skb_queue_purge(&chan->tx_q); - break; - } + BT_DBG("parent %p", parent); + + /* Close not yet accepted channels */ + while ((sk = bt_accept_dequeue(parent, NULL))) { + struct l2cap_chan *chan = l2cap_pi(sk)->chan; - return; + l2cap_chan_lock(chan); + __clear_chan_timer(chan); + l2cap_chan_close(chan, ECONNRESET); + l2cap_chan_unlock(chan); + + chan->ops->close(chan->data); + } } void l2cap_chan_close(struct l2cap_chan *chan, int reason) @@ -559,8 +562,12 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason) switch (chan->state) { case BT_LISTEN: - if (chan->ops->teardown) - chan->ops->teardown(chan, 0); + lock_sock(sk); + l2cap_chan_cleanup_listen(sk); + + __l2cap_state_change(chan, BT_CLOSED); + sock_set_flag(sk, SOCK_ZAPPED); + release_sock(sk); break; case BT_CONNECTED: @@ -588,7 +595,7 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason) rsp.scid = cpu_to_le16(chan->dcid); rsp.dcid = cpu_to_le16(chan->scid); rsp.result = cpu_to_le16(result); - rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO); + rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp); } @@ -602,8 +609,9 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason) break; default: - if (chan->ops->teardown) - chan->ops->teardown(chan, 0); + lock_sock(sk); + sock_set_flag(sk, SOCK_ZAPPED); + release_sock(sk); break; } } @@ -619,7 +627,7 @@ static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan) default: return HCI_AT_NO_BONDING; } - } else if (chan->psm == __constant_cpu_to_le16(L2CAP_PSM_SDP)) { + } else if (chan->psm == cpu_to_le16(0x0001)) { if (chan->sec_level == BT_SECURITY_LOW) chan->sec_level = BT_SECURITY_SDP; @@ -765,11 +773,9 @@ static inline void __unpack_control(struct l2cap_chan *chan, if (test_bit(FLAG_EXT_CTRL, &chan->flags)) { __unpack_extended_control(get_unaligned_le32(skb->data), &bt_cb(skb)->control); - skb_pull(skb, L2CAP_EXT_CTRL_SIZE); } else { __unpack_enhanced_control(get_unaligned_le16(skb->data), &bt_cb(skb)->control); - skb_pull(skb, L2CAP_ENH_CTRL_SIZE); } } @@ -824,102 +830,66 @@ static inline void __pack_control(struct l2cap_chan *chan, } } -static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan) -{ - if (test_bit(FLAG_EXT_CTRL, &chan->flags)) - return L2CAP_EXT_HDR_SIZE; - else - return L2CAP_ENH_HDR_SIZE; -} - -static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan, - u32 control) +static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control) { struct sk_buff *skb; struct l2cap_hdr *lh; - int hlen = __ertm_hdr_size(chan); + struct l2cap_conn *conn = chan->conn; + int count, hlen; + + if (chan->state != BT_CONNECTED) + return; + + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + hlen = L2CAP_EXT_HDR_SIZE; + else + hlen = L2CAP_ENH_HDR_SIZE; if (chan->fcs == L2CAP_FCS_CRC16) hlen += L2CAP_FCS_SIZE; - skb = bt_skb_alloc(hlen, GFP_KERNEL); + BT_DBG("chan %p, control 0x%8.8x", chan, control); + + count = min_t(unsigned int, conn->mtu, hlen); + + control |= __set_sframe(chan); + + if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) + control |= __set_ctrl_final(chan); + if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state)) + control |= __set_ctrl_poll(chan); + + skb = bt_skb_alloc(count, GFP_ATOMIC); if (!skb) - return ERR_PTR(-ENOMEM); + return; lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE); lh->cid = cpu_to_le16(chan->dcid); - if (test_bit(FLAG_EXT_CTRL, &chan->flags)) - put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE)); - else - put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE)); + __put_control(chan, control, skb_put(skb, __ctrl_size(chan))); if (chan->fcs == L2CAP_FCS_CRC16) { - u16 fcs = crc16(0, (u8 *)skb->data, skb->len); + u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE); put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE)); } skb->priority = HCI_PRIO_MAX; - return skb; + l2cap_do_send(chan, skb); } -static void l2cap_send_sframe(struct l2cap_chan *chan, - struct l2cap_ctrl *control) +static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control) { - struct sk_buff *skb; - u32 control_field; - - BT_DBG("chan %p, control %p", chan, control); - - if (!control->sframe) - return; - - if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) && - !control->poll) - control->final = 1; - - if (control->super == L2CAP_SUPER_RR) - clear_bit(CONN_RNR_SENT, &chan->conn_state); - else if (control->super == L2CAP_SUPER_RNR) + if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { + control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR); set_bit(CONN_RNR_SENT, &chan->conn_state); + } else + control |= __set_ctrl_super(chan, L2CAP_SUPER_RR); - if (control->super != L2CAP_SUPER_SREJ) { - chan->last_acked_seq = control->reqseq; - __clear_ack_timer(chan); - } - - BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq, - control->final, control->poll, control->super); - - if (test_bit(FLAG_EXT_CTRL, &chan->flags)) - control_field = __pack_extended_control(control); - else - control_field = __pack_enhanced_control(control); - - skb = l2cap_create_sframe_pdu(chan, control_field); - if (!IS_ERR(skb)) - l2cap_do_send(chan, skb); -} - -static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll) -{ - struct l2cap_ctrl control; - - BT_DBG("chan %p, poll %d", chan, poll); - - memset(&control, 0, sizeof(control)); - control.sframe = 1; - control.poll = poll; - - if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) - control.super = L2CAP_SUPER_RNR; - else - control.super = L2CAP_SUPER_RR; + control |= __set_reqseq(chan, chan->buffer_seq); - control.reqseq = chan->buffer_seq; - l2cap_send_sframe(chan, &control); + l2cap_send_sframe(chan, control); } static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan) @@ -944,13 +914,25 @@ static void l2cap_send_conn_req(struct l2cap_chan *chan) static void l2cap_chan_ready(struct l2cap_chan *chan) { - /* This clears all conf flags, including CONF_NOT_COMPLETE */ + struct sock *sk = chan->sk; + struct sock *parent; + + lock_sock(sk); + + parent = bt_sk(sk)->parent; + + BT_DBG("sk %p, parent %p", sk, parent); + chan->conf_state = 0; __clear_chan_timer(chan); - chan->state = BT_CONNECTED; + __l2cap_state_change(chan, BT_CONNECTED); + sk->sk_state_change(sk); + + if (parent) + parent->sk_data_ready(parent, 0); - chan->ops->ready(chan); + release_sock(sk); } static void l2cap_do_start(struct l2cap_chan *chan) @@ -971,7 +953,7 @@ static void l2cap_do_start(struct l2cap_chan *chan) l2cap_send_conn_req(chan); } else { struct l2cap_info_req req; - req.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK); + req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; conn->info_ident = l2cap_get_ident(conn); @@ -1013,11 +995,6 @@ static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *c __clear_ack_timer(chan); } - if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) { - __l2cap_state_change(chan, BT_DISCONN); - return; - } - req.dcid = cpu_to_le16(chan->dcid); req.scid = cpu_to_le16(chan->scid); l2cap_send_cmd(conn, l2cap_get_ident(conn), @@ -1076,20 +1053,20 @@ static void l2cap_conn_start(struct l2cap_conn *conn) if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { struct sock *parent = bt_sk(sk)->parent; - rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND); - rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND); + rsp.result = cpu_to_le16(L2CAP_CR_PEND); + rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND); if (parent) parent->sk_data_ready(parent, 0); } else { __l2cap_state_change(chan, BT_CONFIG); - rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS); - rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO); + rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); + rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); } release_sock(sk); } else { - rsp.result = __constant_cpu_to_le16(L2CAP_CR_PEND); - rsp.status = __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND); + rsp.result = cpu_to_le16(L2CAP_CR_PEND); + rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND); } l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, @@ -1173,7 +1150,13 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn) lock_sock(parent); - chan = pchan->ops->new_connection(pchan); + /* Check for backlog size */ + if (sk_acceptq_is_full(parent)) { + BT_DBG("backlog full %d", parent->sk_ack_backlog); + goto clean; + } + + chan = pchan->ops->new_connection(pchan->data); if (!chan) goto clean; @@ -1188,7 +1171,10 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn) l2cap_chan_add(conn, chan); - l2cap_chan_ready(chan); + __set_chan_timer(chan, sk->sk_sndtimeo); + + __l2cap_state_change(chan, BT_CONNECTED); + parent->sk_data_ready(parent, 0); clean: release_sock(parent); @@ -1212,11 +1198,6 @@ static void l2cap_conn_ready(struct l2cap_conn *conn) l2cap_chan_lock(chan); - if (chan->chan_type == L2CAP_CHAN_CONN_FIX_A2MP) { - l2cap_chan_unlock(chan); - continue; - } - if (conn->hcon->type == LE_LINK) { if (smp_conn_security(conn, chan->sec_level)) l2cap_chan_ready(chan); @@ -1289,7 +1270,7 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err) l2cap_chan_unlock(chan); - chan->ops->close(chan); + chan->ops->close(chan->data); l2cap_chan_put(chan); } @@ -1314,12 +1295,7 @@ static void security_timeout(struct work_struct *work) struct l2cap_conn *conn = container_of(work, struct l2cap_conn, security_timer.work); - BT_DBG("conn %p", conn); - - if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) { - smp_chan_destroy(conn); - l2cap_conn_del(conn->hcon, ETIMEDOUT); - } + l2cap_conn_del(conn->hcon, ETIMEDOUT); } static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status) @@ -1463,17 +1439,21 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, goto done; } - switch (chan->state) { + lock_sock(sk); + + switch (sk->sk_state) { case BT_CONNECT: case BT_CONNECT2: case BT_CONFIG: /* Already connecting */ err = 0; + release_sock(sk); goto done; case BT_CONNECTED: /* Already connected */ err = -EISCONN; + release_sock(sk); goto done; case BT_OPEN: @@ -1483,12 +1463,13 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, default: err = -EBADFD; + release_sock(sk); goto done; } /* Set destination address and psm */ - lock_sock(sk); bacpy(&bt_sk(sk)->dst, dst); + release_sock(sk); chan->psm = psm; @@ -1590,20 +1571,23 @@ int __l2cap_wait_ack(struct sock *sk) static void l2cap_monitor_timeout(struct work_struct *work) { struct l2cap_chan *chan = container_of(work, struct l2cap_chan, - monitor_timer.work); + monitor_timer.work); BT_DBG("chan %p", chan); l2cap_chan_lock(chan); - if (!chan->conn) { + if (chan->retry_count >= chan->remote_max_tx) { + l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED); l2cap_chan_unlock(chan); l2cap_chan_put(chan); return; } - l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO); + chan->retry_count++; + __set_monitor_timer(chan); + l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL); l2cap_chan_unlock(chan); l2cap_chan_put(chan); } @@ -1611,293 +1595,234 @@ static void l2cap_monitor_timeout(struct work_struct *work) static void l2cap_retrans_timeout(struct work_struct *work) { struct l2cap_chan *chan = container_of(work, struct l2cap_chan, - retrans_timer.work); + retrans_timer.work); BT_DBG("chan %p", chan); l2cap_chan_lock(chan); - if (!chan->conn) { - l2cap_chan_unlock(chan); - l2cap_chan_put(chan); - return; - } + chan->retry_count = 1; + __set_monitor_timer(chan); + + set_bit(CONN_WAIT_F, &chan->conn_state); + + l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL); - l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO); l2cap_chan_unlock(chan); l2cap_chan_put(chan); } -static void l2cap_streaming_send(struct l2cap_chan *chan, - struct sk_buff_head *skbs) +static void l2cap_drop_acked_frames(struct l2cap_chan *chan) { struct sk_buff *skb; - struct l2cap_ctrl *control; - - BT_DBG("chan %p, skbs %p", chan, skbs); - skb_queue_splice_tail_init(skbs, &chan->tx_q); - - while (!skb_queue_empty(&chan->tx_q)) { + while ((skb = skb_peek(&chan->tx_q)) && + chan->unacked_frames) { + if (bt_cb(skb)->control.txseq == chan->expected_ack_seq) + break; skb = skb_dequeue(&chan->tx_q); + kfree_skb(skb); + + chan->unacked_frames--; + } - bt_cb(skb)->control.retries = 1; - control = &bt_cb(skb)->control; + if (!chan->unacked_frames) + __clear_retrans_timer(chan); +} - control->reqseq = 0; - control->txseq = chan->next_tx_seq; +static void l2cap_streaming_send(struct l2cap_chan *chan) +{ + struct sk_buff *skb; + u32 control; + u16 fcs; - __pack_control(chan, control, skb); + while ((skb = skb_dequeue(&chan->tx_q))) { + control = __get_control(chan, skb->data + L2CAP_HDR_SIZE); + control |= __set_txseq(chan, chan->next_tx_seq); + control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar); + __put_control(chan, control, skb->data + L2CAP_HDR_SIZE); if (chan->fcs == L2CAP_FCS_CRC16) { - u16 fcs = crc16(0, (u8 *) skb->data, skb->len); - put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE)); + fcs = crc16(0, (u8 *)skb->data, + skb->len - L2CAP_FCS_SIZE); + put_unaligned_le16(fcs, + skb->data + skb->len - L2CAP_FCS_SIZE); } l2cap_do_send(chan, skb); - BT_DBG("Sent txseq %d", (int)control->txseq); - chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq); - chan->frames_sent++; } } -static int l2cap_ertm_send(struct l2cap_chan *chan) +static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq) { struct sk_buff *skb, *tx_skb; - struct l2cap_ctrl *control; - int sent = 0; - - BT_DBG("chan %p", chan); - - if (chan->state != BT_CONNECTED) - return -ENOTCONN; - - if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) - return 0; - - while (chan->tx_send_head && - chan->unacked_frames < chan->remote_tx_win && - chan->tx_state == L2CAP_TX_STATE_XMIT) { - - skb = chan->tx_send_head; - - bt_cb(skb)->control.retries = 1; - control = &bt_cb(skb)->control; + u16 fcs; + u32 control; - if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) - control->final = 1; + skb = skb_peek(&chan->tx_q); + if (!skb) + return; - control->reqseq = chan->buffer_seq; - chan->last_acked_seq = chan->buffer_seq; - control->txseq = chan->next_tx_seq; + while (bt_cb(skb)->control.txseq != tx_seq) { + if (skb_queue_is_last(&chan->tx_q, skb)) + return; - __pack_control(chan, control, skb); + skb = skb_queue_next(&chan->tx_q, skb); + } - if (chan->fcs == L2CAP_FCS_CRC16) { - u16 fcs = crc16(0, (u8 *) skb->data, skb->len); - put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE)); - } + if (bt_cb(skb)->control.retries == chan->remote_max_tx && + chan->remote_max_tx) { + l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED); + return; + } - /* Clone after data has been modified. Data is assumed to be - read-only (for locking purposes) on cloned sk_buffs. - */ - tx_skb = skb_clone(skb, GFP_KERNEL); + tx_skb = skb_clone(skb, GFP_ATOMIC); + bt_cb(skb)->control.retries++; - if (!tx_skb) - break; + control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE); + control &= __get_sar_mask(chan); - __set_retrans_timer(chan); + if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) + control |= __set_ctrl_final(chan); - chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq); - chan->unacked_frames++; - chan->frames_sent++; - sent++; + control |= __set_reqseq(chan, chan->buffer_seq); + control |= __set_txseq(chan, tx_seq); - if (skb_queue_is_last(&chan->tx_q, skb)) - chan->tx_send_head = NULL; - else - chan->tx_send_head = skb_queue_next(&chan->tx_q, skb); + __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE); - l2cap_do_send(chan, tx_skb); - BT_DBG("Sent txseq %d", (int)control->txseq); + if (chan->fcs == L2CAP_FCS_CRC16) { + fcs = crc16(0, (u8 *)tx_skb->data, + tx_skb->len - L2CAP_FCS_SIZE); + put_unaligned_le16(fcs, + tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE); } - BT_DBG("Sent %d, %d unacked, %d in ERTM queue", sent, - (int) chan->unacked_frames, skb_queue_len(&chan->tx_q)); - - return sent; + l2cap_do_send(chan, tx_skb); } -static void l2cap_ertm_resend(struct l2cap_chan *chan) +static int l2cap_ertm_send(struct l2cap_chan *chan) { - struct l2cap_ctrl control; - struct sk_buff *skb; - struct sk_buff *tx_skb; - u16 seq; + struct sk_buff *skb, *tx_skb; + u16 fcs; + u32 control; + int nsent = 0; - BT_DBG("chan %p", chan); + if (chan->state != BT_CONNECTED) + return -ENOTCONN; if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) - return; + return 0; - while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) { - seq = l2cap_seq_list_pop(&chan->retrans_list); + while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) { - skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq); - if (!skb) { - BT_DBG("Error: Can't retransmit seq %d, frame missing", - seq); - continue; + if (bt_cb(skb)->control.retries == chan->remote_max_tx && + chan->remote_max_tx) { + l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED); + break; } + tx_skb = skb_clone(skb, GFP_ATOMIC); + bt_cb(skb)->control.retries++; - control = bt_cb(skb)->control; - if (chan->max_tx != 0 && - bt_cb(skb)->control.retries > chan->max_tx) { - BT_DBG("Retry limit exceeded (%d)", chan->max_tx); - l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); - l2cap_seq_list_clear(&chan->retrans_list); - break; - } + control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE); + control &= __get_sar_mask(chan); - control.reqseq = chan->buffer_seq; if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) - control.final = 1; - else - control.final = 0; - - if (skb_cloned(skb)) { - /* Cloned sk_buffs are read-only, so we need a - * writeable copy - */ - tx_skb = skb_copy(skb, GFP_ATOMIC); - } else { - tx_skb = skb_clone(skb, GFP_ATOMIC); - } + control |= __set_ctrl_final(chan); - if (!tx_skb) { - l2cap_seq_list_clear(&chan->retrans_list); - break; - } + control |= __set_reqseq(chan, chan->buffer_seq); + control |= __set_txseq(chan, chan->next_tx_seq); + control |= __set_ctrl_sar(chan, bt_cb(skb)->control.sar); - /* Update skb contents */ - if (test_bit(FLAG_EXT_CTRL, &chan->flags)) { - put_unaligned_le32(__pack_extended_control(&control), - tx_skb->data + L2CAP_HDR_SIZE); - } else { - put_unaligned_le16(__pack_enhanced_control(&control), - tx_skb->data + L2CAP_HDR_SIZE); - } + __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE); if (chan->fcs == L2CAP_FCS_CRC16) { - u16 fcs = crc16(0, (u8 *) tx_skb->data, tx_skb->len); - put_unaligned_le16(fcs, skb_put(tx_skb, - L2CAP_FCS_SIZE)); + fcs = crc16(0, (u8 *)skb->data, + tx_skb->len - L2CAP_FCS_SIZE); + put_unaligned_le16(fcs, skb->data + + tx_skb->len - L2CAP_FCS_SIZE); } l2cap_do_send(chan, tx_skb); - BT_DBG("Resent txseq %d", control.txseq); + __set_retrans_timer(chan); + + bt_cb(skb)->control.txseq = chan->next_tx_seq; + + chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq); + + if (bt_cb(skb)->control.retries == 1) { + chan->unacked_frames++; + + if (!nsent++) + __clear_ack_timer(chan); + } + + chan->frames_sent++; - chan->last_acked_seq = chan->buffer_seq; + if (skb_queue_is_last(&chan->tx_q, skb)) + chan->tx_send_head = NULL; + else + chan->tx_send_head = skb_queue_next(&chan->tx_q, skb); } + + return nsent; } -static void l2cap_retransmit(struct l2cap_chan *chan, - struct l2cap_ctrl *control) +static int l2cap_retransmit_frames(struct l2cap_chan *chan) { - BT_DBG("chan %p, control %p", chan, control); + int ret; - l2cap_seq_list_append(&chan->retrans_list, control->reqseq); - l2cap_ertm_resend(chan); + if (!skb_queue_empty(&chan->tx_q)) + chan->tx_send_head = chan->tx_q.next; + + chan->next_tx_seq = chan->expected_ack_seq; + ret = l2cap_ertm_send(chan); + return ret; } -static void l2cap_retransmit_all(struct l2cap_chan *chan, - struct l2cap_ctrl *control) +static void __l2cap_send_ack(struct l2cap_chan *chan) { - struct sk_buff *skb; - - BT_DBG("chan %p, control %p", chan, control); - - if (control->poll) - set_bit(CONN_SEND_FBIT, &chan->conn_state); + u32 control = 0; - l2cap_seq_list_clear(&chan->retrans_list); + control |= __set_reqseq(chan, chan->buffer_seq); - if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) + if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { + control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR); + set_bit(CONN_RNR_SENT, &chan->conn_state); + l2cap_send_sframe(chan, control); return; + } - if (chan->unacked_frames) { - skb_queue_walk(&chan->tx_q, skb) { - if (bt_cb(skb)->control.txseq == control->reqseq || - skb == chan->tx_send_head) - break; - } - - skb_queue_walk_from(&chan->tx_q, skb) { - if (skb == chan->tx_send_head) - break; - - l2cap_seq_list_append(&chan->retrans_list, - bt_cb(skb)->control.txseq); - } + if (l2cap_ertm_send(chan) > 0) + return; - l2cap_ertm_resend(chan); - } + control |= __set_ctrl_super(chan, L2CAP_SUPER_RR); + l2cap_send_sframe(chan, control); } static void l2cap_send_ack(struct l2cap_chan *chan) { - struct l2cap_ctrl control; - u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq, - chan->last_acked_seq); - int threshold; - - BT_DBG("chan %p last_acked_seq %d buffer_seq %d", - chan, chan->last_acked_seq, chan->buffer_seq); + __clear_ack_timer(chan); + __l2cap_send_ack(chan); +} - memset(&control, 0, sizeof(control)); - control.sframe = 1; +static void l2cap_send_srejtail(struct l2cap_chan *chan) +{ + struct srej_list *tail; + u32 control; - if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) && - chan->rx_state == L2CAP_RX_STATE_RECV) { - __clear_ack_timer(chan); - control.super = L2CAP_SUPER_RNR; - control.reqseq = chan->buffer_seq; - l2cap_send_sframe(chan, &control); - } else { - if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) { - l2cap_ertm_send(chan); - /* If any i-frames were sent, they included an ack */ - if (chan->buffer_seq == chan->last_acked_seq) - frames_to_ack = 0; - } + control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ); + control |= __set_ctrl_final(chan); - /* Ack now if the tx window is 3/4ths full. - * Calculate without mul or div - */ - threshold = chan->tx_win; - threshold += threshold << 1; - threshold >>= 2; - - BT_DBG("frames_to_ack %d, threshold %d", (int)frames_to_ack, - threshold); - - if (frames_to_ack >= threshold) { - __clear_ack_timer(chan); - control.super = L2CAP_SUPER_RR; - control.reqseq = chan->buffer_seq; - l2cap_send_sframe(chan, &control); - frames_to_ack = 0; - } + tail = list_entry((&chan->srej_l)->prev, struct srej_list, list); + control |= __set_reqseq(chan, tail->tx_seq); - if (frames_to_ack) - __set_ack_timer(chan); - } + l2cap_send_sframe(chan, control); } static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan, @@ -2026,7 +1951,10 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, if (!conn) return ERR_PTR(-ENOTCONN); - hlen = __ertm_hdr_size(chan); + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + hlen = L2CAP_EXT_HDR_SIZE; + else + hlen = L2CAP_ENH_HDR_SIZE; if (sdulen) hlen += L2CAP_SDULEN_SIZE; @@ -2046,11 +1974,7 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, lh->cid = cpu_to_le16(chan->dcid); lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); - /* Control header is populated later */ - if (test_bit(FLAG_EXT_CTRL, &chan->flags)) - put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE)); - else - put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE)); + __put_control(chan, 0, skb_put(skb, __ctrl_size(chan))); if (sdulen) put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE)); @@ -2061,8 +1985,10 @@ static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, return ERR_PTR(err); } - bt_cb(skb)->control.fcs = chan->fcs; - bt_cb(skb)->control.retries = 0; + if (chan->fcs == L2CAP_FCS_CRC16) + put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE)); + + bt_cb(skb)->control.retries = 0; return skb; } @@ -2073,6 +1999,7 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan, struct sk_buff *skb; u16 sdu_len; size_t pdu_len; + int err = 0; u8 sar; BT_DBG("chan %p, msg %p, len %d", chan, msg, (int)len); @@ -2088,10 +2015,7 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan, pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD); /* Adjust for largest possible L2CAP overhead. */ - if (chan->fcs) - pdu_len -= L2CAP_FCS_SIZE; - - pdu_len -= __ertm_hdr_size(chan); + pdu_len -= L2CAP_EXT_HDR_SIZE + L2CAP_FCS_SIZE; /* Remote device may have requested smaller PDUs */ pdu_len = min_t(size_t, pdu_len, chan->remote_mps); @@ -2131,7 +2055,7 @@ static int l2cap_segment_sdu(struct l2cap_chan *chan, } } - return 0; + return err; } int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len, @@ -2193,12 +2117,17 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len, if (err) break; + if (chan->mode == L2CAP_MODE_ERTM && chan->tx_send_head == NULL) + chan->tx_send_head = seg_queue.next; + skb_queue_splice_tail_init(&seg_queue, &chan->tx_q); + if (chan->mode == L2CAP_MODE_ERTM) - l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST); + err = l2cap_ertm_send(chan); else - l2cap_streaming_send(chan, &seg_queue); + l2cap_streaming_send(chan); - err = len; + if (err >= 0) + err = len; /* If the skbs were not queued for sending, they'll still be in * seg_queue and need to be purged. @@ -2214,296 +2143,6 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len, return err; } -static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq) -{ - struct l2cap_ctrl control; - u16 seq; - - BT_DBG("chan %p, txseq %d", chan, txseq); - - memset(&control, 0, sizeof(control)); - control.sframe = 1; - control.super = L2CAP_SUPER_SREJ; - - for (seq = chan->expected_tx_seq; seq != txseq; - seq = __next_seq(chan, seq)) { - if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) { - control.reqseq = seq; - l2cap_send_sframe(chan, &control); - l2cap_seq_list_append(&chan->srej_list, seq); - } - } - - chan->expected_tx_seq = __next_seq(chan, txseq); -} - -static void l2cap_send_srej_tail(struct l2cap_chan *chan) -{ - struct l2cap_ctrl control; - - BT_DBG("chan %p", chan); - - if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR) - return; - - memset(&control, 0, sizeof(control)); - control.sframe = 1; - control.super = L2CAP_SUPER_SREJ; - control.reqseq = chan->srej_list.tail; - l2cap_send_sframe(chan, &control); -} - -static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq) -{ - struct l2cap_ctrl control; - u16 initial_head; - u16 seq; - - BT_DBG("chan %p, txseq %d", chan, txseq); - - memset(&control, 0, sizeof(control)); - control.sframe = 1; - control.super = L2CAP_SUPER_SREJ; - - /* Capture initial list head to allow only one pass through the list. */ - initial_head = chan->srej_list.head; - - do { - seq = l2cap_seq_list_pop(&chan->srej_list); - if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR) - break; - - control.reqseq = seq; - l2cap_send_sframe(chan, &control); - l2cap_seq_list_append(&chan->srej_list, seq); - } while (chan->srej_list.head != initial_head); -} - -static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq) -{ - struct sk_buff *acked_skb; - u16 ackseq; - - BT_DBG("chan %p, reqseq %d", chan, reqseq); - - if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq) - return; - - BT_DBG("expected_ack_seq %d, unacked_frames %d", - chan->expected_ack_seq, chan->unacked_frames); - - for (ackseq = chan->expected_ack_seq; ackseq != reqseq; - ackseq = __next_seq(chan, ackseq)) { - - acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq); - if (acked_skb) { - skb_unlink(acked_skb, &chan->tx_q); - kfree_skb(acked_skb); - chan->unacked_frames--; - } - } - - chan->expected_ack_seq = reqseq; - - if (chan->unacked_frames == 0) - __clear_retrans_timer(chan); - - BT_DBG("unacked_frames %d", (int) chan->unacked_frames); -} - -static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan) -{ - BT_DBG("chan %p", chan); - - chan->expected_tx_seq = chan->buffer_seq; - l2cap_seq_list_clear(&chan->srej_list); - skb_queue_purge(&chan->srej_q); - chan->rx_state = L2CAP_RX_STATE_RECV; -} - -static void l2cap_tx_state_xmit(struct l2cap_chan *chan, - struct l2cap_ctrl *control, - struct sk_buff_head *skbs, u8 event) -{ - BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs, - event); - - switch (event) { - case L2CAP_EV_DATA_REQUEST: - if (chan->tx_send_head == NULL) - chan->tx_send_head = skb_peek(skbs); - - skb_queue_splice_tail_init(skbs, &chan->tx_q); - l2cap_ertm_send(chan); - break; - case L2CAP_EV_LOCAL_BUSY_DETECTED: - BT_DBG("Enter LOCAL_BUSY"); - set_bit(CONN_LOCAL_BUSY, &chan->conn_state); - - if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) { - /* The SREJ_SENT state must be aborted if we are to - * enter the LOCAL_BUSY state. - */ - l2cap_abort_rx_srej_sent(chan); - } - - l2cap_send_ack(chan); - - break; - case L2CAP_EV_LOCAL_BUSY_CLEAR: - BT_DBG("Exit LOCAL_BUSY"); - clear_bit(CONN_LOCAL_BUSY, &chan->conn_state); - - if (test_bit(CONN_RNR_SENT, &chan->conn_state)) { - struct l2cap_ctrl local_control; - - memset(&local_control, 0, sizeof(local_control)); - local_control.sframe = 1; - local_control.super = L2CAP_SUPER_RR; - local_control.poll = 1; - local_control.reqseq = chan->buffer_seq; - l2cap_send_sframe(chan, &local_control); - - chan->retry_count = 1; - __set_monitor_timer(chan); - chan->tx_state = L2CAP_TX_STATE_WAIT_F; - } - break; - case L2CAP_EV_RECV_REQSEQ_AND_FBIT: - l2cap_process_reqseq(chan, control->reqseq); - break; - case L2CAP_EV_EXPLICIT_POLL: - l2cap_send_rr_or_rnr(chan, 1); - chan->retry_count = 1; - __set_monitor_timer(chan); - __clear_ack_timer(chan); - chan->tx_state = L2CAP_TX_STATE_WAIT_F; - break; - case L2CAP_EV_RETRANS_TO: - l2cap_send_rr_or_rnr(chan, 1); - chan->retry_count = 1; - __set_monitor_timer(chan); - chan->tx_state = L2CAP_TX_STATE_WAIT_F; - break; - case L2CAP_EV_RECV_FBIT: - /* Nothing to process */ - break; - default: - break; - } -} - -static void l2cap_tx_state_wait_f(struct l2cap_chan *chan, - struct l2cap_ctrl *control, - struct sk_buff_head *skbs, u8 event) -{ - BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs, - event); - - switch (event) { - case L2CAP_EV_DATA_REQUEST: - if (chan->tx_send_head == NULL) - chan->tx_send_head = skb_peek(skbs); - /* Queue data, but don't send. */ - skb_queue_splice_tail_init(skbs, &chan->tx_q); - break; - case L2CAP_EV_LOCAL_BUSY_DETECTED: - BT_DBG("Enter LOCAL_BUSY"); - set_bit(CONN_LOCAL_BUSY, &chan->conn_state); - - if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) { - /* The SREJ_SENT state must be aborted if we are to - * enter the LOCAL_BUSY state. - */ - l2cap_abort_rx_srej_sent(chan); - } - - l2cap_send_ack(chan); - - break; - case L2CAP_EV_LOCAL_BUSY_CLEAR: - BT_DBG("Exit LOCAL_BUSY"); - clear_bit(CONN_LOCAL_BUSY, &chan->conn_state); - - if (test_bit(CONN_RNR_SENT, &chan->conn_state)) { - struct l2cap_ctrl local_control; - memset(&local_control, 0, sizeof(local_control)); - local_control.sframe = 1; - local_control.super = L2CAP_SUPER_RR; - local_control.poll = 1; - local_control.reqseq = chan->buffer_seq; - l2cap_send_sframe(chan, &local_control); - - chan->retry_count = 1; - __set_monitor_timer(chan); - chan->tx_state = L2CAP_TX_STATE_WAIT_F; - } - break; - case L2CAP_EV_RECV_REQSEQ_AND_FBIT: - l2cap_process_reqseq(chan, control->reqseq); - - /* Fall through */ - - case L2CAP_EV_RECV_FBIT: - if (control && control->final) { - __clear_monitor_timer(chan); - if (chan->unacked_frames > 0) - __set_retrans_timer(chan); - chan->retry_count = 0; - chan->tx_state = L2CAP_TX_STATE_XMIT; - BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state); - } - break; - case L2CAP_EV_EXPLICIT_POLL: - /* Ignore */ - break; - case L2CAP_EV_MONITOR_TO: - if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) { - l2cap_send_rr_or_rnr(chan, 1); - __set_monitor_timer(chan); - chan->retry_count++; - } else { - l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED); - } - break; - default: - break; - } -} - -static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control, - struct sk_buff_head *skbs, u8 event) -{ - BT_DBG("chan %p, control %p, skbs %p, event %d, state %d", - chan, control, skbs, event, chan->tx_state); - - switch (chan->tx_state) { - case L2CAP_TX_STATE_XMIT: - l2cap_tx_state_xmit(chan, control, skbs, event); - break; - case L2CAP_TX_STATE_WAIT_F: - l2cap_tx_state_wait_f(chan, control, skbs, event); - break; - default: - /* Ignore event */ - break; - } -} - -static void l2cap_pass_to_tx(struct l2cap_chan *chan, - struct l2cap_ctrl *control) -{ - BT_DBG("chan %p, control %p", chan, control); - l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT); -} - -static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan, - struct l2cap_ctrl *control) -{ - BT_DBG("chan %p, control %p", chan, control); - l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT); -} - /* Copy frame to all raw sockets on that connection */ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb) { @@ -2526,7 +2165,7 @@ static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb) if (!nskb) continue; - if (chan->ops->recv(chan, nskb)) + if (chan->ops->recv(chan->data, nskb)) kfree_skb(nskb); } @@ -2556,9 +2195,9 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen); if (conn->hcon->type == LE_LINK) - lh->cid = __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING); + lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING); else - lh->cid = __constant_cpu_to_le16(L2CAP_CID_SIGNALING); + lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING); cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE); cmd->code = code; @@ -2670,8 +2309,8 @@ static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan) efs.stype = chan->local_stype; efs.msdu = cpu_to_le16(chan->local_msdu); efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime); - efs.acc_lat = __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT); - efs.flush_to = __constant_cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO); + efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT); + efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO); break; case L2CAP_MODE_STREAMING: @@ -2694,24 +2333,20 @@ static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan) static void l2cap_ack_timeout(struct work_struct *work) { struct l2cap_chan *chan = container_of(work, struct l2cap_chan, - ack_timer.work); - u16 frames_to_ack; + ack_timer.work); BT_DBG("chan %p", chan); l2cap_chan_lock(chan); - frames_to_ack = __seq_offset(chan, chan->buffer_seq, - chan->last_acked_seq); - - if (frames_to_ack) - l2cap_send_rr_or_rnr(chan, 0); + __l2cap_send_ack(chan); l2cap_chan_unlock(chan); + l2cap_chan_put(chan); } -int l2cap_ertm_init(struct l2cap_chan *chan) +static inline int l2cap_ertm_init(struct l2cap_chan *chan) { int err; @@ -2720,6 +2355,7 @@ int l2cap_ertm_init(struct l2cap_chan *chan) chan->expected_ack_seq = 0; chan->unacked_frames = 0; chan->buffer_seq = 0; + chan->num_acked = 0; chan->frames_sent = 0; chan->last_acked_seq = 0; chan->sdu = NULL; @@ -2740,15 +2376,12 @@ int l2cap_ertm_init(struct l2cap_chan *chan) skb_queue_head_init(&chan->srej_q); + INIT_LIST_HEAD(&chan->srej_l); err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win); if (err < 0) return err; - err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win); - if (err < 0) - l2cap_seq_list_free(&chan->srej_list); - - return err; + return l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win); } static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask) @@ -2874,7 +2507,6 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data) break; case L2CAP_MODE_STREAMING: - l2cap_txwin_setup(chan); rfc.mode = L2CAP_MODE_STREAMING; rfc.txwin_size = 0; rfc.max_transmit = 0; @@ -2905,7 +2537,7 @@ static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data) } req->dcid = cpu_to_le16(chan->dcid); - req->flags = __constant_cpu_to_le16(0); + req->flags = cpu_to_le16(0); return ptr - data; } @@ -3125,7 +2757,7 @@ static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data) } rsp->scid = cpu_to_le16(chan->dcid); rsp->result = cpu_to_le16(result); - rsp->flags = __constant_cpu_to_le16(0); + rsp->flags = cpu_to_le16(0x0000); return ptr - data; } @@ -3224,7 +2856,7 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, voi } req->dcid = cpu_to_le16(chan->dcid); - req->flags = __constant_cpu_to_le16(0); + req->flags = cpu_to_le16(0x0000); return ptr - data; } @@ -3251,8 +2883,8 @@ void __l2cap_connect_rsp_defer(struct l2cap_chan *chan) rsp.scid = cpu_to_le16(chan->dcid); rsp.dcid = cpu_to_le16(chan->scid); - rsp.result = __constant_cpu_to_le16(L2CAP_CR_SUCCESS); - rsp.status = __constant_cpu_to_le16(L2CAP_CS_NO_INFO); + rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); + rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp); @@ -3290,8 +2922,8 @@ static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len) * did not send an RFC option. */ rfc.mode = chan->mode; - rfc.retrans_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO); - rfc.monitor_timeout = __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO); + rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO); + rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO); rfc.max_pdu_size = cpu_to_le16(chan->imtu); BT_ERR("Expected RFC option was not found, using defaults"); @@ -3354,7 +2986,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd lock_sock(parent); /* Check if the ACL is secure enough (if not SDP) */ - if (psm != __constant_cpu_to_le16(L2CAP_PSM_SDP) && + if (psm != cpu_to_le16(0x0001) && !hci_conn_check_link_mode(conn->hcon)) { conn->disc_reason = HCI_ERROR_AUTH_FAILURE; result = L2CAP_CR_SEC_BLOCK; @@ -3363,16 +2995,25 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd result = L2CAP_CR_NO_MEM; - /* Check if we already have channel with that dcid */ - if (__l2cap_get_chan_by_dcid(conn, scid)) + /* Check for backlog size */ + if (sk_acceptq_is_full(parent)) { + BT_DBG("backlog full %d", parent->sk_ack_backlog); goto response; + } - chan = pchan->ops->new_connection(pchan); + chan = pchan->ops->new_connection(pchan->data); if (!chan) goto response; sk = chan->sk; + /* Check if we already have channel with that dcid */ + if (__l2cap_get_chan_by_dcid(conn, scid)) { + sock_set_flag(sk, SOCK_ZAPPED); + chan->ops->close(chan->data); + goto response; + } + hci_conn_hold(conn->hcon); bacpy(&bt_sk(sk)->src, conn->src); @@ -3426,7 +3067,7 @@ static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hd if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) { struct l2cap_info_req info; - info.type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK); + info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; conn->info_ident = l2cap_get_ident(conn); @@ -3548,7 +3189,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) { struct l2cap_cmd_rej_cid rej; - rej.reason = __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID); + rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID); rej.scid = cpu_to_le16(chan->scid); rej.dcid = cpu_to_le16(chan->dcid); @@ -3570,11 +3211,11 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr memcpy(chan->conf_req + chan->conf_len, req->data, len); chan->conf_len += len; - if (flags & L2CAP_CONF_FLAG_CONTINUATION) { + if (flags & 0x0001) { /* Incomplete config. Send empty response. */ l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, l2cap_build_conf_rsp(chan, rsp, - L2CAP_CONF_SUCCESS, flags), rsp); + L2CAP_CONF_SUCCESS, 0x0001), rsp); goto unlock; } @@ -3597,6 +3238,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) { set_default_fcs(chan); + l2cap_state_change(chan, BT_CONNECTED); + if (chan->mode == L2CAP_MODE_ERTM || chan->mode == L2CAP_MODE_STREAMING) err = l2cap_ertm_init(chan); @@ -3628,7 +3271,7 @@ static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, l2cap_build_conf_rsp(chan, rsp, - L2CAP_CONF_SUCCESS, flags), rsp); + L2CAP_CONF_SUCCESS, 0x0000), rsp); } unlock: @@ -3719,7 +3362,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr goto done; } - if (flags & L2CAP_CONF_FLAG_CONTINUATION) + if (flags & 0x01) goto done; set_bit(CONF_INPUT_DONE, &chan->conf_state); @@ -3727,6 +3370,7 @@ static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) { set_default_fcs(chan); + l2cap_state_change(chan, BT_CONNECTED); if (chan->mode == L2CAP_MODE_ERTM || chan->mode == L2CAP_MODE_STREAMING) err = l2cap_ertm_init(chan); @@ -3780,7 +3424,7 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd l2cap_chan_unlock(chan); - chan->ops->close(chan); + chan->ops->close(chan->data); l2cap_chan_put(chan); mutex_unlock(&conn->chan_lock); @@ -3814,7 +3458,7 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd l2cap_chan_unlock(chan); - chan->ops->close(chan); + chan->ops->close(chan->data); l2cap_chan_put(chan); mutex_unlock(&conn->chan_lock); @@ -3835,8 +3479,8 @@ static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cm u8 buf[8]; u32 feat_mask = l2cap_feat_mask; struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; - rsp->type = __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK); - rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS); + rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK); + rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); if (!disable_ertm) feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING | L2CAP_FEAT_FCS; @@ -3856,15 +3500,15 @@ static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cm else l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP; - rsp->type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN); - rsp->result = __constant_cpu_to_le16(L2CAP_IR_SUCCESS); + rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); + rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan)); l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf), buf); } else { struct l2cap_info_rsp rsp; rsp.type = cpu_to_le16(type); - rsp.result = __constant_cpu_to_le16(L2CAP_IR_NOTSUPP); + rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP); l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp), &rsp); } @@ -3904,7 +3548,7 @@ static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cm if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) { struct l2cap_info_req req; - req.type = __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN); + req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); conn->info_ident = l2cap_get_ident(conn); @@ -4139,9 +3783,9 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn, err = l2cap_check_conn_param(min, max, latency, to_multiplier); if (err) - rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED); + rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED); else - rsp.result = __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED); + rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED); l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP, sizeof(rsp), &rsp); @@ -4289,7 +3933,7 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn, BT_ERR("Wrong link type (%d)", err); /* FIXME: Map err to a valid reason */ - rej.reason = __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD); + rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD); l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej); } @@ -4321,38 +3965,65 @@ static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb) return 0; } -static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan) +static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan) { - struct l2cap_ctrl control; + u32 control = 0; - BT_DBG("chan %p", chan); + chan->frames_sent = 0; - memset(&control, 0, sizeof(control)); - control.sframe = 1; - control.final = 1; - control.reqseq = chan->buffer_seq; - set_bit(CONN_SEND_FBIT, &chan->conn_state); + control |= __set_reqseq(chan, chan->buffer_seq); if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { - control.super = L2CAP_SUPER_RNR; - l2cap_send_sframe(chan, &control); + control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR); + l2cap_send_sframe(chan, control); + set_bit(CONN_RNR_SENT, &chan->conn_state); } - if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) && - chan->unacked_frames > 0) - __set_retrans_timer(chan); + if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) + l2cap_retransmit_frames(chan); - /* Send pending iframes */ l2cap_ertm_send(chan); if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) && - test_bit(CONN_SEND_FBIT, &chan->conn_state)) { - /* F-bit wasn't sent in an s-frame or i-frame yet, so - * send it now. - */ - control.super = L2CAP_SUPER_RR; - l2cap_send_sframe(chan, &control); + chan->frames_sent == 0) { + control |= __set_ctrl_super(chan, L2CAP_SUPER_RR); + l2cap_send_sframe(chan, control); + } +} + +static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar) +{ + struct sk_buff *next_skb; + int tx_seq_offset, next_tx_seq_offset; + + bt_cb(skb)->control.txseq = tx_seq; + bt_cb(skb)->control.sar = sar; + + next_skb = skb_peek(&chan->srej_q); + + tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq); + + while (next_skb) { + if (bt_cb(next_skb)->control.txseq == tx_seq) + return -EINVAL; + + next_tx_seq_offset = __seq_offset(chan, + bt_cb(next_skb)->control.txseq, chan->buffer_seq); + + if (next_tx_seq_offset > tx_seq_offset) { + __skb_queue_before(&chan->srej_q, next_skb, skb); + return 0; + } + + if (skb_queue_is_last(&chan->srej_q, next_skb)) + next_skb = NULL; + else + next_skb = skb_queue_next(&chan->srej_q, next_skb); } + + __skb_queue_tail(&chan->srej_q, skb); + + return 0; } static void append_skb_frag(struct sk_buff *skb, @@ -4374,17 +4045,16 @@ static void append_skb_frag(struct sk_buff *skb, skb->truesize += new_frag->truesize; } -static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, - struct l2cap_ctrl *control) +static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control) { int err = -EINVAL; - switch (control->sar) { + switch (__get_ctrl_sar(chan, control)) { case L2CAP_SAR_UNSEGMENTED: if (chan->sdu) break; - err = chan->ops->recv(chan, skb); + err = chan->ops->recv(chan->data, skb); break; case L2CAP_SAR_START: @@ -4434,7 +4104,7 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, if (chan->sdu->len != chan->sdu_len) break; - err = chan->ops->recv(chan, chan->sdu); + err = chan->ops->recv(chan->data, chan->sdu); if (!err) { /* Reassembly complete */ @@ -4456,609 +4126,448 @@ static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, return err; } -void l2cap_chan_busy(struct l2cap_chan *chan, int busy) +static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan) { - u8 event; + BT_DBG("chan %p, Enter local busy", chan); - if (chan->mode != L2CAP_MODE_ERTM) - return; + set_bit(CONN_LOCAL_BUSY, &chan->conn_state); + l2cap_seq_list_clear(&chan->srej_list); - event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR; - l2cap_tx(chan, NULL, NULL, event); + __set_ack_timer(chan); } -static int l2cap_rx_queued_iframes(struct l2cap_chan *chan) +static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan) { - int err = 0; - /* Pass sequential frames to l2cap_reassemble_sdu() - * until a gap is encountered. - */ + u32 control; - BT_DBG("chan %p", chan); + if (!test_bit(CONN_RNR_SENT, &chan->conn_state)) + goto done; - while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { - struct sk_buff *skb; - BT_DBG("Searching for skb with txseq %d (queue len %d)", - chan->buffer_seq, skb_queue_len(&chan->srej_q)); + control = __set_reqseq(chan, chan->buffer_seq); + control |= __set_ctrl_poll(chan); + control |= __set_ctrl_super(chan, L2CAP_SUPER_RR); + l2cap_send_sframe(chan, control); + chan->retry_count = 1; - skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq); + __clear_retrans_timer(chan); + __set_monitor_timer(chan); - if (!skb) - break; + set_bit(CONN_WAIT_F, &chan->conn_state); - skb_unlink(skb, &chan->srej_q); - chan->buffer_seq = __next_seq(chan, chan->buffer_seq); - err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control); - if (err) - break; - } +done: + clear_bit(CONN_LOCAL_BUSY, &chan->conn_state); + clear_bit(CONN_RNR_SENT, &chan->conn_state); - if (skb_queue_empty(&chan->srej_q)) { - chan->rx_state = L2CAP_RX_STATE_RECV; - l2cap_send_ack(chan); - } + BT_DBG("chan %p, Exit local busy", chan); +} - return err; +void l2cap_chan_busy(struct l2cap_chan *chan, int busy) +{ + if (chan->mode == L2CAP_MODE_ERTM) { + if (busy) + l2cap_ertm_enter_local_busy(chan); + else + l2cap_ertm_exit_local_busy(chan); + } } -static void l2cap_handle_srej(struct l2cap_chan *chan, - struct l2cap_ctrl *control) +static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq) { struct sk_buff *skb; + u32 control; - BT_DBG("chan %p, control %p", chan, control); + while ((skb = skb_peek(&chan->srej_q)) && + !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { + int err; - if (control->reqseq == chan->next_tx_seq) { - BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq); - l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); - return; - } + if (bt_cb(skb)->control.txseq != tx_seq) + break; - skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq); + skb = skb_dequeue(&chan->srej_q); + control = __set_ctrl_sar(chan, bt_cb(skb)->control.sar); + err = l2cap_reassemble_sdu(chan, skb, control); - if (skb == NULL) { - BT_DBG("Seq %d not available for retransmission", - control->reqseq); - return; - } + if (err < 0) { + l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); + break; + } - if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) { - BT_DBG("Retry limit exceeded (%d)", chan->max_tx); - l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); - return; + chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej); + tx_seq = __next_seq(chan, tx_seq); } +} - clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); - - if (control->poll) { - l2cap_pass_to_tx(chan, control); - - set_bit(CONN_SEND_FBIT, &chan->conn_state); - l2cap_retransmit(chan, control); - l2cap_ertm_send(chan); - - if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) { - set_bit(CONN_SREJ_ACT, &chan->conn_state); - chan->srej_save_reqseq = control->reqseq; - } - } else { - l2cap_pass_to_tx_fbit(chan, control); +static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq) +{ + struct srej_list *l, *tmp; + u32 control; - if (control->final) { - if (chan->srej_save_reqseq != control->reqseq || - !test_and_clear_bit(CONN_SREJ_ACT, - &chan->conn_state)) - l2cap_retransmit(chan, control); - } else { - l2cap_retransmit(chan, control); - if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) { - set_bit(CONN_SREJ_ACT, &chan->conn_state); - chan->srej_save_reqseq = control->reqseq; - } + list_for_each_entry_safe(l, tmp, &chan->srej_l, list) { + if (l->tx_seq == tx_seq) { + list_del(&l->list); + kfree(l); + return; } + control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ); + control |= __set_reqseq(chan, l->tx_seq); + l2cap_send_sframe(chan, control); + list_del(&l->list); + list_add_tail(&l->list, &chan->srej_l); } } -static void l2cap_handle_rej(struct l2cap_chan *chan, - struct l2cap_ctrl *control) +static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq) { - struct sk_buff *skb; + struct srej_list *new; + u32 control; - BT_DBG("chan %p, control %p", chan, control); + while (tx_seq != chan->expected_tx_seq) { + control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ); + control |= __set_reqseq(chan, chan->expected_tx_seq); + l2cap_seq_list_append(&chan->srej_list, chan->expected_tx_seq); + l2cap_send_sframe(chan, control); - if (control->reqseq == chan->next_tx_seq) { - BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq); - l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); - return; - } + new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC); + if (!new) + return -ENOMEM; - skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq); + new->tx_seq = chan->expected_tx_seq; - if (chan->max_tx && skb && - bt_cb(skb)->control.retries >= chan->max_tx) { - BT_DBG("Retry limit exceeded (%d)", chan->max_tx); - l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); - return; - } + chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq); - clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); + list_add_tail(&new->list, &chan->srej_l); + } - l2cap_pass_to_tx(chan, control); + chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq); - if (control->final) { - if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) - l2cap_retransmit_all(chan, control); - } else { - l2cap_retransmit_all(chan, control); - l2cap_ertm_send(chan); - if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) - set_bit(CONN_REJ_ACT, &chan->conn_state); - } + return 0; } -static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq) +static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb) { - BT_DBG("chan %p, txseq %d", chan, txseq); + u16 tx_seq = __get_txseq(chan, rx_control); + u16 req_seq = __get_reqseq(chan, rx_control); + u8 sar = __get_ctrl_sar(chan, rx_control); + int tx_seq_offset, expected_tx_seq_offset; + int num_to_ack = (chan->tx_win/6) + 1; + int err = 0; - BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq, - chan->expected_tx_seq); + BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len, + tx_seq, rx_control); - if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) { - if (__seq_offset(chan, txseq, chan->last_acked_seq) >= - chan->tx_win) { - /* See notes below regarding "double poll" and - * invalid packets. - */ - if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) { - BT_DBG("Invalid/Ignore - after SREJ"); - return L2CAP_TXSEQ_INVALID_IGNORE; - } else { - BT_DBG("Invalid - in window after SREJ sent"); - return L2CAP_TXSEQ_INVALID; - } - } + if (__is_ctrl_final(chan, rx_control) && + test_bit(CONN_WAIT_F, &chan->conn_state)) { + __clear_monitor_timer(chan); + if (chan->unacked_frames > 0) + __set_retrans_timer(chan); + clear_bit(CONN_WAIT_F, &chan->conn_state); + } - if (chan->srej_list.head == txseq) { - BT_DBG("Expected SREJ"); - return L2CAP_TXSEQ_EXPECTED_SREJ; - } + chan->expected_ack_seq = req_seq; + l2cap_drop_acked_frames(chan); - if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) { - BT_DBG("Duplicate SREJ - txseq already stored"); - return L2CAP_TXSEQ_DUPLICATE_SREJ; - } + tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq); - if (l2cap_seq_list_contains(&chan->srej_list, txseq)) { - BT_DBG("Unexpected SREJ - not requested"); - return L2CAP_TXSEQ_UNEXPECTED_SREJ; - } + /* invalid tx_seq */ + if (tx_seq_offset >= chan->tx_win) { + l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); + goto drop; } - if (chan->expected_tx_seq == txseq) { - if (__seq_offset(chan, txseq, chan->last_acked_seq) >= - chan->tx_win) { - BT_DBG("Invalid - txseq outside tx window"); - return L2CAP_TXSEQ_INVALID; - } else { - BT_DBG("Expected"); - return L2CAP_TXSEQ_EXPECTED; - } + if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { + if (!test_bit(CONN_RNR_SENT, &chan->conn_state)) + l2cap_send_ack(chan); + goto drop; } - if (__seq_offset(chan, txseq, chan->last_acked_seq) < - __seq_offset(chan, chan->expected_tx_seq, - chan->last_acked_seq)){ - BT_DBG("Duplicate - expected_tx_seq later than txseq"); - return L2CAP_TXSEQ_DUPLICATE; - } - - if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) { - /* A source of invalid packets is a "double poll" condition, - * where delays cause us to send multiple poll packets. If - * the remote stack receives and processes both polls, - * sequence numbers can wrap around in such a way that a - * resent frame has a sequence number that looks like new data - * with a sequence gap. This would trigger an erroneous SREJ - * request. - * - * Fortunately, this is impossible with a tx window that's - * less than half of the maximum sequence number, which allows - * invalid frames to be safely ignored. - * - * With tx window sizes greater than half of the tx window - * maximum, the frame is invalid and cannot be ignored. This - * causes a disconnect. - */ + if (tx_seq == chan->expected_tx_seq) + goto expected; - if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) { - BT_DBG("Invalid/Ignore - txseq outside tx window"); - return L2CAP_TXSEQ_INVALID_IGNORE; - } else { - BT_DBG("Invalid - txseq outside tx window"); - return L2CAP_TXSEQ_INVALID; - } - } else { - BT_DBG("Unexpected - txseq indicates missing frames"); - return L2CAP_TXSEQ_UNEXPECTED; - } -} - -static int l2cap_rx_state_recv(struct l2cap_chan *chan, - struct l2cap_ctrl *control, - struct sk_buff *skb, u8 event) -{ - int err = 0; - bool skb_in_use = 0; + if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) { + struct srej_list *first; - BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb, - event); + first = list_first_entry(&chan->srej_l, + struct srej_list, list); + if (tx_seq == first->tx_seq) { + l2cap_add_to_srej_queue(chan, skb, tx_seq, sar); + l2cap_check_srej_gap(chan, tx_seq); - switch (event) { - case L2CAP_EV_RECV_IFRAME: - switch (l2cap_classify_txseq(chan, control->txseq)) { - case L2CAP_TXSEQ_EXPECTED: - l2cap_pass_to_tx(chan, control); + list_del(&first->list); + kfree(first); - if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { - BT_DBG("Busy, discarding expected seq %d", - control->txseq); - break; + if (list_empty(&chan->srej_l)) { + chan->buffer_seq = chan->buffer_seq_srej; + clear_bit(CONN_SREJ_SENT, &chan->conn_state); + l2cap_send_ack(chan); + BT_DBG("chan %p, Exit SREJ_SENT", chan); } + } else { + struct srej_list *l; - chan->expected_tx_seq = __next_seq(chan, - control->txseq); - - chan->buffer_seq = chan->expected_tx_seq; - skb_in_use = 1; - - err = l2cap_reassemble_sdu(chan, skb, control); - if (err) - break; + /* duplicated tx_seq */ + if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0) + goto drop; - if (control->final) { - if (!test_and_clear_bit(CONN_REJ_ACT, - &chan->conn_state)) { - control->final = 0; - l2cap_retransmit_all(chan, control); - l2cap_ertm_send(chan); + list_for_each_entry(l, &chan->srej_l, list) { + if (l->tx_seq == tx_seq) { + l2cap_resend_srejframe(chan, tx_seq); + return 0; } } - if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) - l2cap_send_ack(chan); - break; - case L2CAP_TXSEQ_UNEXPECTED: - l2cap_pass_to_tx(chan, control); - - /* Can't issue SREJ frames in the local busy state. - * Drop this frame, it will be seen as missing - * when local busy is exited. - */ - if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { - BT_DBG("Busy, discarding unexpected seq %d", - control->txseq); - break; + err = l2cap_send_srejframe(chan, tx_seq); + if (err < 0) { + l2cap_send_disconn_req(chan->conn, chan, -err); + return err; } + } + } else { + expected_tx_seq_offset = __seq_offset(chan, + chan->expected_tx_seq, chan->buffer_seq); - /* There was a gap in the sequence, so an SREJ - * must be sent for each missing frame. The - * current frame is stored for later use. - */ - skb_queue_tail(&chan->srej_q, skb); - skb_in_use = 1; - BT_DBG("Queued %p (queue len %d)", skb, - skb_queue_len(&chan->srej_q)); + /* duplicated tx_seq */ + if (tx_seq_offset < expected_tx_seq_offset) + goto drop; - clear_bit(CONN_SREJ_ACT, &chan->conn_state); - l2cap_seq_list_clear(&chan->srej_list); - l2cap_send_srej(chan, control->txseq); + set_bit(CONN_SREJ_SENT, &chan->conn_state); - chan->rx_state = L2CAP_RX_STATE_SREJ_SENT; - break; - case L2CAP_TXSEQ_DUPLICATE: - l2cap_pass_to_tx(chan, control); - break; - case L2CAP_TXSEQ_INVALID_IGNORE: - break; - case L2CAP_TXSEQ_INVALID: - default: - l2cap_send_disconn_req(chan->conn, chan, - ECONNRESET); - break; - } - break; - case L2CAP_EV_RECV_RR: - l2cap_pass_to_tx(chan, control); - if (control->final) { - clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); + BT_DBG("chan %p, Enter SREJ", chan); - if (!test_and_clear_bit(CONN_REJ_ACT, - &chan->conn_state)) { - control->final = 0; - l2cap_retransmit_all(chan, control); - } + INIT_LIST_HEAD(&chan->srej_l); + chan->buffer_seq_srej = chan->buffer_seq; - l2cap_ertm_send(chan); - } else if (control->poll) { - l2cap_send_i_or_rr_or_rnr(chan); - } else { - if (test_and_clear_bit(CONN_REMOTE_BUSY, - &chan->conn_state) && - chan->unacked_frames) - __set_retrans_timer(chan); + __skb_queue_head_init(&chan->srej_q); + l2cap_add_to_srej_queue(chan, skb, tx_seq, sar); - l2cap_ertm_send(chan); - } - break; - case L2CAP_EV_RECV_RNR: - set_bit(CONN_REMOTE_BUSY, &chan->conn_state); - l2cap_pass_to_tx(chan, control); - if (control && control->poll) { - set_bit(CONN_SEND_FBIT, &chan->conn_state); - l2cap_send_rr_or_rnr(chan, 0); + /* Set P-bit only if there are some I-frames to ack. */ + if (__clear_ack_timer(chan)) + set_bit(CONN_SEND_PBIT, &chan->conn_state); + + err = l2cap_send_srejframe(chan, tx_seq); + if (err < 0) { + l2cap_send_disconn_req(chan->conn, chan, -err); + return err; } - __clear_retrans_timer(chan); - l2cap_seq_list_clear(&chan->retrans_list); - break; - case L2CAP_EV_RECV_REJ: - l2cap_handle_rej(chan, control); - break; - case L2CAP_EV_RECV_SREJ: - l2cap_handle_srej(chan, control); - break; - default: - break; } + return 0; - if (skb && !skb_in_use) { - BT_DBG("Freeing %p", skb); - kfree_skb(skb); +expected: + chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq); + + if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) { + bt_cb(skb)->control.txseq = tx_seq; + bt_cb(skb)->control.sar = sar; + __skb_queue_tail(&chan->srej_q, skb); + return 0; } - return err; -} + err = l2cap_reassemble_sdu(chan, skb, rx_control); + chan->buffer_seq = __next_seq(chan, chan->buffer_seq); -static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan, - struct l2cap_ctrl *control, - struct sk_buff *skb, u8 event) -{ - int err = 0; - u16 txseq = control->txseq; - bool skb_in_use = 0; - - BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb, - event); - - switch (event) { - case L2CAP_EV_RECV_IFRAME: - switch (l2cap_classify_txseq(chan, txseq)) { - case L2CAP_TXSEQ_EXPECTED: - /* Keep frame for reassembly later */ - l2cap_pass_to_tx(chan, control); - skb_queue_tail(&chan->srej_q, skb); - skb_in_use = 1; - BT_DBG("Queued %p (queue len %d)", skb, - skb_queue_len(&chan->srej_q)); - - chan->expected_tx_seq = __next_seq(chan, txseq); - break; - case L2CAP_TXSEQ_EXPECTED_SREJ: - l2cap_seq_list_pop(&chan->srej_list); + if (err < 0) { + l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); + return err; + } - l2cap_pass_to_tx(chan, control); - skb_queue_tail(&chan->srej_q, skb); - skb_in_use = 1; - BT_DBG("Queued %p (queue len %d)", skb, - skb_queue_len(&chan->srej_q)); + if (__is_ctrl_final(chan, rx_control)) { + if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) + l2cap_retransmit_frames(chan); + } - err = l2cap_rx_queued_iframes(chan); - if (err) - break; - break; - case L2CAP_TXSEQ_UNEXPECTED: - /* Got a frame that can't be reassembled yet. - * Save it for later, and send SREJs to cover - * the missing frames. - */ - skb_queue_tail(&chan->srej_q, skb); - skb_in_use = 1; - BT_DBG("Queued %p (queue len %d)", skb, - skb_queue_len(&chan->srej_q)); - - l2cap_pass_to_tx(chan, control); - l2cap_send_srej(chan, control->txseq); - break; - case L2CAP_TXSEQ_UNEXPECTED_SREJ: - /* This frame was requested with an SREJ, but - * some expected retransmitted frames are - * missing. Request retransmission of missing - * SREJ'd frames. - */ - skb_queue_tail(&chan->srej_q, skb); - skb_in_use = 1; - BT_DBG("Queued %p (queue len %d)", skb, - skb_queue_len(&chan->srej_q)); - - l2cap_pass_to_tx(chan, control); - l2cap_send_srej_list(chan, control->txseq); - break; - case L2CAP_TXSEQ_DUPLICATE_SREJ: - /* We've already queued this frame. Drop this copy. */ - l2cap_pass_to_tx(chan, control); - break; - case L2CAP_TXSEQ_DUPLICATE: - /* Expecting a later sequence number, so this frame - * was already received. Ignore it completely. - */ - break; - case L2CAP_TXSEQ_INVALID_IGNORE: - break; - case L2CAP_TXSEQ_INVALID: - default: - l2cap_send_disconn_req(chan->conn, chan, - ECONNRESET); - break; - } - break; - case L2CAP_EV_RECV_RR: - l2cap_pass_to_tx(chan, control); - if (control->final) { - clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); + chan->num_acked = (chan->num_acked + 1) % num_to_ack; + if (chan->num_acked == num_to_ack - 1) + l2cap_send_ack(chan); + else + __set_ack_timer(chan); - if (!test_and_clear_bit(CONN_REJ_ACT, - &chan->conn_state)) { - control->final = 0; - l2cap_retransmit_all(chan, control); - } + return 0; - l2cap_ertm_send(chan); - } else if (control->poll) { - if (test_and_clear_bit(CONN_REMOTE_BUSY, - &chan->conn_state) && - chan->unacked_frames) { - __set_retrans_timer(chan); - } +drop: + kfree_skb(skb); + return 0; +} - set_bit(CONN_SEND_FBIT, &chan->conn_state); - l2cap_send_srej_tail(chan); - } else { - if (test_and_clear_bit(CONN_REMOTE_BUSY, - &chan->conn_state) && - chan->unacked_frames) +static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control) +{ + BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, + __get_reqseq(chan, rx_control), rx_control); + + chan->expected_ack_seq = __get_reqseq(chan, rx_control); + l2cap_drop_acked_frames(chan); + + if (__is_ctrl_poll(chan, rx_control)) { + set_bit(CONN_SEND_FBIT, &chan->conn_state); + if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) { + if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) && + (chan->unacked_frames > 0)) __set_retrans_timer(chan); - l2cap_send_ack(chan); - } - break; - case L2CAP_EV_RECV_RNR: - set_bit(CONN_REMOTE_BUSY, &chan->conn_state); - l2cap_pass_to_tx(chan, control); - if (control->poll) { - l2cap_send_srej_tail(chan); + clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); + l2cap_send_srejtail(chan); } else { - struct l2cap_ctrl rr_control; - memset(&rr_control, 0, sizeof(rr_control)); - rr_control.sframe = 1; - rr_control.super = L2CAP_SUPER_RR; - rr_control.reqseq = chan->buffer_seq; - l2cap_send_sframe(chan, &rr_control); + l2cap_send_i_or_rr_or_rnr(chan); } - break; - case L2CAP_EV_RECV_REJ: - l2cap_handle_rej(chan, control); - break; - case L2CAP_EV_RECV_SREJ: - l2cap_handle_srej(chan, control); - break; - } + } else if (__is_ctrl_final(chan, rx_control)) { + clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); - if (skb && !skb_in_use) { - BT_DBG("Freeing %p", skb); - kfree_skb(skb); - } + if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) + l2cap_retransmit_frames(chan); - return err; + } else { + if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) && + (chan->unacked_frames > 0)) + __set_retrans_timer(chan); + + clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); + if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) + l2cap_send_ack(chan); + else + l2cap_ertm_send(chan); + } } -static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq) +static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control) { - /* Make sure reqseq is for a packet that has been sent but not acked */ - u16 unacked; + u16 tx_seq = __get_reqseq(chan, rx_control); - unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq); - return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked; -} + BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control); + + clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); + + chan->expected_ack_seq = tx_seq; + l2cap_drop_acked_frames(chan); + + if (__is_ctrl_final(chan, rx_control)) { + if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) + l2cap_retransmit_frames(chan); + } else { + l2cap_retransmit_frames(chan); -static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control, - struct sk_buff *skb, u8 event) + if (test_bit(CONN_WAIT_F, &chan->conn_state)) + set_bit(CONN_REJ_ACT, &chan->conn_state); + } +} +static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control) { - int err = 0; + u16 tx_seq = __get_reqseq(chan, rx_control); - BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan, - control, skb, event, chan->rx_state); + BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control); - if (__valid_reqseq(chan, control->reqseq)) { - switch (chan->rx_state) { - case L2CAP_RX_STATE_RECV: - err = l2cap_rx_state_recv(chan, control, skb, event); - break; - case L2CAP_RX_STATE_SREJ_SENT: - err = l2cap_rx_state_srej_sent(chan, control, skb, - event); - break; - default: - /* shut it down */ - break; + clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); + + if (__is_ctrl_poll(chan, rx_control)) { + chan->expected_ack_seq = tx_seq; + l2cap_drop_acked_frames(chan); + + set_bit(CONN_SEND_FBIT, &chan->conn_state); + l2cap_retransmit_one_frame(chan, tx_seq); + + l2cap_ertm_send(chan); + + if (test_bit(CONN_WAIT_F, &chan->conn_state)) { + chan->srej_save_reqseq = tx_seq; + set_bit(CONN_SREJ_ACT, &chan->conn_state); } + } else if (__is_ctrl_final(chan, rx_control)) { + if (test_bit(CONN_SREJ_ACT, &chan->conn_state) && + chan->srej_save_reqseq == tx_seq) + clear_bit(CONN_SREJ_ACT, &chan->conn_state); + else + l2cap_retransmit_one_frame(chan, tx_seq); } else { - BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d", - control->reqseq, chan->next_tx_seq, - chan->expected_ack_seq); - l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); + l2cap_retransmit_one_frame(chan, tx_seq); + if (test_bit(CONN_WAIT_F, &chan->conn_state)) { + chan->srej_save_reqseq = tx_seq; + set_bit(CONN_SREJ_ACT, &chan->conn_state); + } } - - return err; } -static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control, - struct sk_buff *skb) +static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control) { - int err = 0; + u16 tx_seq = __get_reqseq(chan, rx_control); - BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb, - chan->rx_state); + BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control); - if (l2cap_classify_txseq(chan, control->txseq) == - L2CAP_TXSEQ_EXPECTED) { - l2cap_pass_to_tx(chan, control); + set_bit(CONN_REMOTE_BUSY, &chan->conn_state); + chan->expected_ack_seq = tx_seq; + l2cap_drop_acked_frames(chan); - BT_DBG("buffer_seq %d->%d", chan->buffer_seq, - __next_seq(chan, chan->buffer_seq)); + if (__is_ctrl_poll(chan, rx_control)) + set_bit(CONN_SEND_FBIT, &chan->conn_state); - chan->buffer_seq = __next_seq(chan, chan->buffer_seq); + if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) { + __clear_retrans_timer(chan); + if (__is_ctrl_poll(chan, rx_control)) + l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL); + return; + } - l2cap_reassemble_sdu(chan, skb, control); + if (__is_ctrl_poll(chan, rx_control)) { + l2cap_send_srejtail(chan); } else { - if (chan->sdu) { - kfree_skb(chan->sdu); - chan->sdu = NULL; - } - chan->sdu_last_frag = NULL; - chan->sdu_len = 0; + rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR); + l2cap_send_sframe(chan, rx_control); + } +} - if (skb) { - BT_DBG("Freeing %p", skb); - kfree_skb(skb); - } +static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb) +{ + BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len); + + if (__is_ctrl_final(chan, rx_control) && + test_bit(CONN_WAIT_F, &chan->conn_state)) { + __clear_monitor_timer(chan); + if (chan->unacked_frames > 0) + __set_retrans_timer(chan); + clear_bit(CONN_WAIT_F, &chan->conn_state); } - chan->last_acked_seq = control->txseq; - chan->expected_tx_seq = __next_seq(chan, control->txseq); + switch (__get_ctrl_super(chan, rx_control)) { + case L2CAP_SUPER_RR: + l2cap_data_channel_rrframe(chan, rx_control); + break; - return err; + case L2CAP_SUPER_REJ: + l2cap_data_channel_rejframe(chan, rx_control); + break; + + case L2CAP_SUPER_SREJ: + l2cap_data_channel_srejframe(chan, rx_control); + break; + + case L2CAP_SUPER_RNR: + l2cap_data_channel_rnrframe(chan, rx_control); + break; + } + + kfree_skb(skb); + return 0; } -static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) +static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) { - struct l2cap_ctrl *control = &bt_cb(skb)->control; - u16 len; - u8 event; + u32 control; + u16 req_seq; + int len, next_tx_seq_offset, req_seq_offset; __unpack_control(chan, skb); + control = __get_control(chan, skb->data); + skb_pull(skb, __ctrl_size(chan)); len = skb->len; /* * We can just drop the corrupted I-frame here. * Receiver will miss it and start proper recovery - * procedures and ask for retransmission. + * procedures and ask retransmission. */ if (l2cap_check_fcs(chan, skb)) goto drop; - if (!control->sframe && control->sar == L2CAP_SAR_START) + if (__is_sar_start(chan, control) && !__is_sframe(chan, control)) len -= L2CAP_SDULEN_SIZE; if (chan->fcs == L2CAP_FCS_CRC16) @@ -5069,57 +4578,34 @@ static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) goto drop; } - if (!control->sframe) { - int err; - - BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d", - control->sar, control->reqseq, control->final, - control->txseq); + req_seq = __get_reqseq(chan, control); - /* Validate F-bit - F=0 always valid, F=1 only - * valid in TX WAIT_F - */ - if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F) - goto drop; + req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq); - if (chan->mode != L2CAP_MODE_STREAMING) { - event = L2CAP_EV_RECV_IFRAME; - err = l2cap_rx(chan, control, skb, event); - } else { - err = l2cap_stream_rx(chan, control, skb); - } + next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq, + chan->expected_ack_seq); - if (err) - l2cap_send_disconn_req(chan->conn, chan, - ECONNRESET); - } else { - const u8 rx_func_to_event[4] = { - L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ, - L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ - }; + /* check for invalid req-seq */ + if (req_seq_offset > next_tx_seq_offset) { + l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); + goto drop; + } - /* Only I-frames are expected in streaming mode */ - if (chan->mode == L2CAP_MODE_STREAMING) + if (!__is_sframe(chan, control)) { + if (len < 0) { + l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); goto drop; + } - BT_DBG("sframe reqseq %d, final %d, poll %d, super %d", - control->reqseq, control->final, control->poll, - control->super); - + l2cap_data_channel_iframe(chan, control, skb); + } else { if (len != 0) { BT_ERR("%d", len); l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); goto drop; } - /* Validate F and P bits */ - if (control->final && (control->poll || - chan->tx_state != L2CAP_TX_STATE_WAIT_F)) - goto drop; - - event = rx_func_to_event[control->super]; - if (l2cap_rx(chan, control, skb, event)) - l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); + l2cap_data_channel_sframe(chan, control, skb); } return 0; @@ -5129,27 +4615,19 @@ static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) return 0; } -static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid, - struct sk_buff *skb) +static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb) { struct l2cap_chan *chan; + u32 control; + u16 tx_seq; + int len; chan = l2cap_get_chan_by_scid(conn, cid); if (!chan) { - if (cid == L2CAP_CID_A2MP) { - chan = a2mp_channel_create(conn, skb); - if (!chan) { - kfree_skb(skb); - return; - } - - l2cap_chan_lock(chan); - } else { - BT_DBG("unknown cid 0x%4.4x", cid); - /* Drop packet and return */ - kfree_skb(skb); - return; - } + BT_DBG("unknown cid 0x%4.4x", cid); + /* Drop packet and return */ + kfree_skb(skb); + return 0; } BT_DBG("chan %p, len %d", chan, skb->len); @@ -5167,13 +4645,49 @@ static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid, if (chan->imtu < skb->len) goto drop; - if (!chan->ops->recv(chan, skb)) + if (!chan->ops->recv(chan->data, skb)) goto done; break; case L2CAP_MODE_ERTM: + l2cap_ertm_data_rcv(chan, skb); + + goto done; + case L2CAP_MODE_STREAMING: - l2cap_data_rcv(chan, skb); + control = __get_control(chan, skb->data); + skb_pull(skb, __ctrl_size(chan)); + len = skb->len; + + if (l2cap_check_fcs(chan, skb)) + goto drop; + + if (__is_sar_start(chan, control)) + len -= L2CAP_SDULEN_SIZE; + + if (chan->fcs == L2CAP_FCS_CRC16) + len -= L2CAP_FCS_SIZE; + + if (len > chan->mps || len < 0 || __is_sframe(chan, control)) + goto drop; + + tx_seq = __get_txseq(chan, control); + + if (chan->expected_tx_seq != tx_seq) { + /* Frame(s) missing - must discard partial SDU */ + kfree_skb(chan->sdu); + chan->sdu = NULL; + chan->sdu_last_frag = NULL; + chan->sdu_len = 0; + + /* TODO: Notify userland of missing data */ + } + + chan->expected_tx_seq = __next_seq(chan, tx_seq); + + if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE) + l2cap_send_disconn_req(chan->conn, chan, ECONNRESET); + goto done; default: @@ -5186,10 +4700,11 @@ static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid, done: l2cap_chan_unlock(chan); + + return 0; } -static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, - struct sk_buff *skb) +static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb) { struct l2cap_chan *chan; @@ -5205,15 +4720,17 @@ static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, if (chan->imtu < skb->len) goto drop; - if (!chan->ops->recv(chan, skb)) - return; + if (!chan->ops->recv(chan->data, skb)) + return 0; drop: kfree_skb(skb); + + return 0; } -static void l2cap_att_channel(struct l2cap_conn *conn, u16 cid, - struct sk_buff *skb) +static inline int l2cap_att_channel(struct l2cap_conn *conn, u16 cid, + struct sk_buff *skb) { struct l2cap_chan *chan; @@ -5229,11 +4746,13 @@ static void l2cap_att_channel(struct l2cap_conn *conn, u16 cid, if (chan->imtu < skb->len) goto drop; - if (!chan->ops->recv(chan, skb)) - return; + if (!chan->ops->recv(chan->data, skb)) + return 0; drop: kfree_skb(skb); + + return 0; } static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb) @@ -5261,7 +4780,7 @@ static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb) case L2CAP_CID_CONN_LESS: psm = get_unaligned((__le16 *) skb->data); - skb_pull(skb, L2CAP_PSMLEN_SIZE); + skb_pull(skb, 2); l2cap_conless_channel(conn, psm, skb); break; @@ -5455,17 +4974,6 @@ int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) rsp.status = cpu_to_le16(stat); l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp); - - if (!test_bit(CONF_REQ_SENT, &chan->conf_state) && - res == L2CAP_CR_SUCCESS) { - char buf[128]; - set_bit(CONF_REQ_SENT, &chan->conf_state); - l2cap_send_cmd(conn, l2cap_get_ident(conn), - L2CAP_CONF_REQ, - l2cap_build_conf_req(chan, buf), - buf); - chan->num_conf_req++; - } } l2cap_chan_unlock(chan); diff --git a/trunk/net/bluetooth/l2cap_sock.c b/trunk/net/bluetooth/l2cap_sock.c index a4bb27e8427e..3bb1611b9d48 100644 --- a/trunk/net/bluetooth/l2cap_sock.c +++ b/trunk/net/bluetooth/l2cap_sock.c @@ -27,6 +27,7 @@ /* Bluetooth L2CAP sockets. */ +#include #include #include @@ -88,8 +89,8 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) if (err < 0) goto done; - if (__le16_to_cpu(la.l2_psm) == L2CAP_PSM_SDP || - __le16_to_cpu(la.l2_psm) == L2CAP_PSM_RFCOMM) + if (__le16_to_cpu(la.l2_psm) == 0x0001 || + __le16_to_cpu(la.l2_psm) == 0x0003) chan->sec_level = BT_SECURITY_SDP; bacpy(&bt_sk(sk)->src, &la.l2_bdaddr); @@ -445,22 +446,6 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, ch return err; } -static bool l2cap_valid_mtu(struct l2cap_chan *chan, u16 mtu) -{ - switch (chan->scid) { - case L2CAP_CID_LE_DATA: - if (mtu < L2CAP_LE_MIN_MTU) - return false; - break; - - default: - if (mtu < L2CAP_DEFAULT_MIN_MTU) - return false; - } - - return true; -} - static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; @@ -499,11 +484,6 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __us break; } - if (!l2cap_valid_mtu(chan, opts.imtu)) { - err = -EINVAL; - break; - } - chan->mode = opts.mode; switch (chan->mode) { case L2CAP_MODE_BASIC: @@ -893,34 +873,9 @@ static int l2cap_sock_release(struct socket *sock) return err; } -static void l2cap_sock_cleanup_listen(struct sock *parent) +static struct l2cap_chan *l2cap_sock_new_connection_cb(void *data) { - struct sock *sk; - - BT_DBG("parent %p", parent); - - /* Close not yet accepted channels */ - while ((sk = bt_accept_dequeue(parent, NULL))) { - struct l2cap_chan *chan = l2cap_pi(sk)->chan; - - l2cap_chan_lock(chan); - __clear_chan_timer(chan); - l2cap_chan_close(chan, ECONNRESET); - l2cap_chan_unlock(chan); - - l2cap_sock_kill(sk); - } -} - -static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan) -{ - struct sock *sk, *parent = chan->data; - - /* Check for backlog size */ - if (sk_acceptq_is_full(parent)) { - BT_DBG("backlog full %d", parent->sk_ack_backlog); - return NULL; - } + struct sock *sk, *parent = data; sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC); @@ -934,10 +889,10 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan) return l2cap_pi(sk)->chan; } -static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb) +static int l2cap_sock_recv_cb(void *data, struct sk_buff *skb) { int err; - struct sock *sk = chan->data; + struct sock *sk = data; struct l2cap_pinfo *pi = l2cap_pi(sk); lock_sock(sk); @@ -970,57 +925,16 @@ static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb) return err; } -static void l2cap_sock_close_cb(struct l2cap_chan *chan) +static void l2cap_sock_close_cb(void *data) { - struct sock *sk = chan->data; + struct sock *sk = data; l2cap_sock_kill(sk); } -static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err) +static void l2cap_sock_state_change_cb(void *data, int state) { - struct sock *sk = chan->data; - struct sock *parent; - - lock_sock(sk); - - parent = bt_sk(sk)->parent; - - sock_set_flag(sk, SOCK_ZAPPED); - - switch (chan->state) { - case BT_OPEN: - case BT_BOUND: - case BT_CLOSED: - break; - case BT_LISTEN: - l2cap_sock_cleanup_listen(sk); - sk->sk_state = BT_CLOSED; - chan->state = BT_CLOSED; - - break; - default: - sk->sk_state = BT_CLOSED; - chan->state = BT_CLOSED; - - sk->sk_err = err; - - if (parent) { - bt_accept_unlink(sk); - parent->sk_data_ready(parent, 0); - } else { - sk->sk_state_change(sk); - } - - break; - } - - release_sock(sk); -} - -static void l2cap_sock_state_change_cb(struct l2cap_chan *chan, int state) -{ - struct sock *sk = chan->data; + struct sock *sk = data; sk->sk_state = state; } @@ -1041,34 +955,12 @@ static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan, return skb; } -static void l2cap_sock_ready_cb(struct l2cap_chan *chan) -{ - struct sock *sk = chan->data; - struct sock *parent; - - lock_sock(sk); - - parent = bt_sk(sk)->parent; - - BT_DBG("sk %p, parent %p", sk, parent); - - sk->sk_state = BT_CONNECTED; - sk->sk_state_change(sk); - - if (parent) - parent->sk_data_ready(parent, 0); - - release_sock(sk); -} - static struct l2cap_ops l2cap_chan_ops = { .name = "L2CAP Socket Interface", .new_connection = l2cap_sock_new_connection_cb, .recv = l2cap_sock_recv_cb, .close = l2cap_sock_close_cb, - .teardown = l2cap_sock_teardown_cb, .state_change = l2cap_sock_state_change_cb, - .ready = l2cap_sock_ready_cb, .alloc_skb = l2cap_sock_alloc_skb_cb, }; diff --git a/trunk/net/bluetooth/lib.c b/trunk/net/bluetooth/lib.c index e1c97527e16c..506628876f36 100644 --- a/trunk/net/bluetooth/lib.c +++ b/trunk/net/bluetooth/lib.c @@ -26,7 +26,12 @@ #define pr_fmt(fmt) "Bluetooth: " fmt -#include +#include + +#include +#include +#include +#include #include diff --git a/trunk/net/bluetooth/mgmt.c b/trunk/net/bluetooth/mgmt.c index c72307cc25fc..25d220776079 100644 --- a/trunk/net/bluetooth/mgmt.c +++ b/trunk/net/bluetooth/mgmt.c @@ -24,6 +24,8 @@ /* Bluetooth HCI Management interface */ +#include +#include #include #include @@ -712,8 +714,7 @@ static struct pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, } static void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, - void (*cb)(struct pending_cmd *cmd, - void *data), + void (*cb)(struct pending_cmd *cmd, void *data), void *data) { struct list_head *p, *n; @@ -870,7 +871,7 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data, } if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) || - mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { + mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { err = cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, MGMT_STATUS_BUSY); goto failed; @@ -977,7 +978,7 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data, } if (mgmt_pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) || - mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { + mgmt_pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { err = cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE, MGMT_STATUS_BUSY); goto failed; @@ -1000,7 +1001,7 @@ static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data, scan = 0; if (test_bit(HCI_ISCAN, &hdev->flags) && - hdev->discov_timeout > 0) + hdev->discov_timeout > 0) cancel_delayed_work(&hdev->discov_off); } @@ -1055,7 +1056,7 @@ static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data, bool changed = false; if (!!cp->val != test_bit(HCI_LINK_SECURITY, - &hdev->dev_flags)) { + &hdev->dev_flags)) { change_bit(HCI_LINK_SECURITY, &hdev->dev_flags); changed = true; } @@ -1316,7 +1317,7 @@ static bool enable_service_cache(struct hci_dev *hdev) } static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data, - u16 len) + u16 len) { struct mgmt_cp_remove_uuid *cp = data; struct pending_cmd *cmd; @@ -1441,7 +1442,7 @@ static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data, } static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data, - u16 len) + u16 len) { struct mgmt_cp_load_link_keys *cp = data; u16 key_count, expected_len; @@ -1453,13 +1454,13 @@ static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data, sizeof(struct mgmt_link_key_info); if (expected_len != len) { BT_ERR("load_link_keys: expected %u bytes, got %u bytes", - len, expected_len); + len, expected_len); return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, MGMT_STATUS_INVALID_PARAMS); } BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys, - key_count); + key_count); hci_dev_lock(hdev); @@ -1534,10 +1535,10 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data, if (cp->disconnect) { if (cp->addr.type == BDADDR_BREDR) conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, - &cp->addr.bdaddr); + &cp->addr.bdaddr); else conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, - &cp->addr.bdaddr); + &cp->addr.bdaddr); } else { conn = NULL; } @@ -1593,8 +1594,7 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data, } if (cp->addr.type == BDADDR_BREDR) - conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, - &cp->addr.bdaddr); + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr); else conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr); @@ -1813,7 +1813,7 @@ static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data, hdev->io_capability = cp->io_capability; BT_DBG("%s IO capability set to 0x%02x", hdev->name, - hdev->io_capability); + hdev->io_capability); hci_dev_unlock(hdev); @@ -1821,7 +1821,7 @@ static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data, 0); } -static struct pending_cmd *find_pairing(struct hci_conn *conn) +static inline struct pending_cmd *find_pairing(struct hci_conn *conn) { struct hci_dev *hdev = conn->hdev; struct pending_cmd *cmd; @@ -1873,22 +1873,6 @@ static void pairing_complete_cb(struct hci_conn *conn, u8 status) pairing_complete(cmd, mgmt_status(status)); } -static void le_connect_complete_cb(struct hci_conn *conn, u8 status) -{ - struct pending_cmd *cmd; - - BT_DBG("status %u", status); - - if (!status) - return; - - cmd = find_pairing(conn); - if (!cmd) - BT_DBG("Unable to find a pending command"); - else - pairing_complete(cmd, mgmt_status(status)); -} - static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { @@ -1927,15 +1911,8 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data, rp.addr.type = cp->addr.type; if (IS_ERR(conn)) { - int status; - - if (PTR_ERR(conn) == -EBUSY) - status = MGMT_STATUS_BUSY; - else - status = MGMT_STATUS_CONNECT_FAILED; - err = cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, - status, &rp, + MGMT_STATUS_CONNECT_FAILED, &rp, sizeof(rp)); goto unlock; } @@ -1957,8 +1934,6 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data, /* For LE, just connecting isn't a proof that the pairing finished */ if (cp->addr.type == BDADDR_BREDR) conn->connect_cfm_cb = pairing_complete_cb; - else - conn->connect_cfm_cb = le_connect_complete_cb; conn->security_cfm_cb = pairing_complete_cb; conn->disconn_cfm_cb = pairing_complete_cb; @@ -1966,7 +1941,7 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data, cmd->user_data = conn; if (conn->state == BT_CONNECTED && - hci_conn_security(conn, sec_level, auth_type)) + hci_conn_security(conn, sec_level, auth_type)) pairing_complete(cmd, 0); err = 0; @@ -2263,7 +2238,7 @@ static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev, } static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev, - void *data, u16 len) + void *data, u16 len) { struct mgmt_cp_remove_remote_oob_data *cp = data; u8 status; @@ -2432,7 +2407,7 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data, case DISCOVERY_RESOLVING: e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, - NAME_PENDING); + NAME_PENDING); if (!e) { mgmt_pending_remove(cmd); err = cmd_complete(sk, hdev->id, @@ -2654,7 +2629,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev, sizeof(struct mgmt_ltk_info); if (expected_len != len) { BT_ERR("load_keys: expected %u bytes, got %u bytes", - len, expected_len); + len, expected_len); return cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, EINVAL); } @@ -2779,7 +2754,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen) } if (opcode >= ARRAY_SIZE(mgmt_handlers) || - mgmt_handlers[opcode].func == NULL) { + mgmt_handlers[opcode].func == NULL) { BT_DBG("Unknown op %u", opcode); err = cmd_status(sk, index, opcode, MGMT_STATUS_UNKNOWN_COMMAND); @@ -2787,7 +2762,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen) } if ((hdev && opcode < MGMT_OP_READ_INFO) || - (!hdev && opcode >= MGMT_OP_READ_INFO)) { + (!hdev && opcode >= MGMT_OP_READ_INFO)) { err = cmd_status(sk, index, opcode, MGMT_STATUS_INVALID_INDEX); goto done; @@ -2796,7 +2771,7 @@ int mgmt_control(struct sock *sk, struct msghdr *msg, size_t msglen) handler = &mgmt_handlers[opcode]; if ((handler->var_len && len < handler->data_len) || - (!handler->var_len && len != handler->data_len)) { + (!handler->var_len && len != handler->data_len)) { err = cmd_status(sk, index, opcode, MGMT_STATUS_INVALID_PARAMS); goto done; @@ -2980,7 +2955,7 @@ int mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, bacpy(&ev.key.addr.bdaddr, &key->bdaddr); ev.key.addr.type = BDADDR_BREDR; ev.key.type = key->type; - memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE); + memcpy(ev.key.val, key->val, 16); ev.key.pin_len = key->pin_len; return mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL); @@ -3115,7 +3090,7 @@ int mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, mgmt_pending_remove(cmd); mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp, - hdev); + hdev); return err; } @@ -3205,7 +3180,7 @@ int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr, } int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr, - u8 link_type, u8 addr_type) + u8 link_type, u8 addr_type) { struct mgmt_ev_user_passkey_request ev; @@ -3219,8 +3194,8 @@ int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr, } static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, - u8 link_type, u8 addr_type, u8 status, - u8 opcode) + u8 link_type, u8 addr_type, u8 status, + u8 opcode) { struct pending_cmd *cmd; struct mgmt_rp_user_confirm_reply rp; @@ -3251,8 +3226,7 @@ int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 status) { return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type, - status, - MGMT_OP_USER_CONFIRM_NEG_REPLY); + status, MGMT_OP_USER_CONFIRM_NEG_REPLY); } int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, @@ -3266,8 +3240,7 @@ int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, u8 addr_type, u8 status) { return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type, - status, - MGMT_OP_USER_PASSKEY_NEG_REPLY); + status, MGMT_OP_USER_PASSKEY_NEG_REPLY); } int mgmt_auth_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, diff --git a/trunk/net/bluetooth/rfcomm/core.c b/trunk/net/bluetooth/rfcomm/core.c index c75107ef8920..8a602388f1e7 100644 --- a/trunk/net/bluetooth/rfcomm/core.c +++ b/trunk/net/bluetooth/rfcomm/core.c @@ -26,8 +26,22 @@ */ #include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include +#include #include +#include + +#include +#include #include #include @@ -101,14 +115,14 @@ static void rfcomm_session_del(struct rfcomm_session *s); #define __get_rpn_stop_bits(line) (((line) >> 2) & 0x1) #define __get_rpn_parity(line) (((line) >> 3) & 0x7) -static void rfcomm_schedule(void) +static inline void rfcomm_schedule(void) { if (!rfcomm_thread) return; wake_up_process(rfcomm_thread); } -static void rfcomm_session_put(struct rfcomm_session *s) +static inline void rfcomm_session_put(struct rfcomm_session *s) { if (atomic_dec_and_test(&s->refcnt)) rfcomm_session_del(s); @@ -213,7 +227,7 @@ static int rfcomm_l2sock_create(struct socket **sock) return err; } -static int rfcomm_check_security(struct rfcomm_dlc *d) +static inline int rfcomm_check_security(struct rfcomm_dlc *d) { struct sock *sk = d->session->sock->sk; struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn; @@ -1736,7 +1750,7 @@ static void rfcomm_process_connect(struct rfcomm_session *s) /* Send data queued for the DLC. * Return number of frames left in the queue. */ -static int rfcomm_process_tx(struct rfcomm_dlc *d) +static inline int rfcomm_process_tx(struct rfcomm_dlc *d) { struct sk_buff *skb; int err; @@ -1784,7 +1798,7 @@ static int rfcomm_process_tx(struct rfcomm_dlc *d) return skb_queue_len(&d->tx_queue); } -static void rfcomm_process_dlcs(struct rfcomm_session *s) +static inline void rfcomm_process_dlcs(struct rfcomm_session *s) { struct rfcomm_dlc *d; struct list_head *p, *n; @@ -1844,7 +1858,7 @@ static void rfcomm_process_dlcs(struct rfcomm_session *s) } } -static void rfcomm_process_rx(struct rfcomm_session *s) +static inline void rfcomm_process_rx(struct rfcomm_session *s) { struct socket *sock = s->sock; struct sock *sk = sock->sk; @@ -1869,7 +1883,7 @@ static void rfcomm_process_rx(struct rfcomm_session *s) } } -static void rfcomm_accept_connection(struct rfcomm_session *s) +static inline void rfcomm_accept_connection(struct rfcomm_session *s) { struct socket *sock = s->sock, *nsock; int err; @@ -1903,7 +1917,7 @@ static void rfcomm_accept_connection(struct rfcomm_session *s) sock_release(nsock); } -static void rfcomm_check_connection(struct rfcomm_session *s) +static inline void rfcomm_check_connection(struct rfcomm_session *s) { struct sock *sk = s->sock->sk; @@ -1927,7 +1941,7 @@ static void rfcomm_check_connection(struct rfcomm_session *s) } } -static void rfcomm_process_sessions(void) +static inline void rfcomm_process_sessions(void) { struct list_head *p, *n; diff --git a/trunk/net/bluetooth/rfcomm/sock.c b/trunk/net/bluetooth/rfcomm/sock.c index 7e1e59645c05..e8707debb864 100644 --- a/trunk/net/bluetooth/rfcomm/sock.c +++ b/trunk/net/bluetooth/rfcomm/sock.c @@ -25,8 +25,27 @@ * RFCOMM sockets. */ -#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include +#include + +#include #include #include diff --git a/trunk/net/bluetooth/rfcomm/tty.c b/trunk/net/bluetooth/rfcomm/tty.c index cb960773c002..d1820ff14aee 100644 --- a/trunk/net/bluetooth/rfcomm/tty.c +++ b/trunk/net/bluetooth/rfcomm/tty.c @@ -31,6 +31,11 @@ #include #include +#include +#include +#include +#include + #include #include #include @@ -127,7 +132,7 @@ static struct rfcomm_dev *__rfcomm_dev_get(int id) return NULL; } -static struct rfcomm_dev *rfcomm_dev_get(int id) +static inline struct rfcomm_dev *rfcomm_dev_get(int id) { struct rfcomm_dev *dev; @@ -340,7 +345,7 @@ static void rfcomm_wfree(struct sk_buff *skb) tty_port_put(&dev->port); } -static void rfcomm_set_owner_w(struct sk_buff *skb, struct rfcomm_dev *dev) +static inline void rfcomm_set_owner_w(struct sk_buff *skb, struct rfcomm_dev *dev) { tty_port_get(&dev->port); atomic_add(skb->truesize, &dev->wmem_alloc); diff --git a/trunk/net/bluetooth/sco.c b/trunk/net/bluetooth/sco.c index 40bbe25dcff7..cbdd313659a7 100644 --- a/trunk/net/bluetooth/sco.c +++ b/trunk/net/bluetooth/sco.c @@ -25,8 +25,26 @@ /* Bluetooth SCO sockets. */ #include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include #include +#include +#include +#include + +#include #include #include @@ -105,7 +123,7 @@ static struct sco_conn *sco_conn_add(struct hci_conn *hcon) return conn; } -static struct sock *sco_chan_get(struct sco_conn *conn) +static inline struct sock *sco_chan_get(struct sco_conn *conn) { struct sock *sk = NULL; sco_conn_lock(conn); @@ -139,8 +157,7 @@ static int sco_conn_del(struct hci_conn *hcon, int err) return 0; } -static int sco_chan_add(struct sco_conn *conn, struct sock *sk, - struct sock *parent) +static inline int sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent) { int err = 0; @@ -211,7 +228,7 @@ static int sco_connect(struct sock *sk) return err; } -static int sco_send_frame(struct sock *sk, struct msghdr *msg, int len) +static inline int sco_send_frame(struct sock *sk, struct msghdr *msg, int len) { struct sco_conn *conn = sco_pi(sk)->conn; struct sk_buff *skb; @@ -237,7 +254,7 @@ static int sco_send_frame(struct sock *sk, struct msghdr *msg, int len) return len; } -static void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb) +static inline void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb) { struct sock *sk = sco_chan_get(conn); @@ -506,7 +523,7 @@ static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen goto done; err = bt_sock_wait_state(sk, BT_CONNECTED, - sock_sndtimeo(sk, flags & O_NONBLOCK)); + sock_sndtimeo(sk, flags & O_NONBLOCK)); done: release_sock(sk); @@ -771,7 +788,7 @@ static int sco_sock_shutdown(struct socket *sock, int how) if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime) err = bt_sock_wait_state(sk, BT_CLOSED, - sk->sk_lingertime); + sk->sk_lingertime); } release_sock(sk); return err; @@ -861,7 +878,7 @@ static void sco_conn_ready(struct sco_conn *conn) bh_lock_sock(parent); sk = sco_sock_alloc(sock_net(parent), NULL, - BTPROTO_SCO, GFP_ATOMIC); + BTPROTO_SCO, GFP_ATOMIC); if (!sk) { bh_unlock_sock(parent); goto done; @@ -890,7 +907,7 @@ static void sco_conn_ready(struct sco_conn *conn) /* ----- SCO interface with lower layer (HCI) ----- */ int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr) { - struct sock *sk; + register struct sock *sk; struct hlist_node *node; int lm = 0; @@ -903,7 +920,7 @@ int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr) continue; if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr) || - !bacmp(&bt_sk(sk)->src, BDADDR_ANY)) { + !bacmp(&bt_sk(sk)->src, BDADDR_ANY)) { lm |= HCI_LM_ACCEPT; break; } @@ -964,7 +981,7 @@ static int sco_debugfs_show(struct seq_file *f, void *p) sk_for_each(sk, node, &sco_sk_list.head) { seq_printf(f, "%s %s %d\n", batostr(&bt_sk(sk)->src), - batostr(&bt_sk(sk)->dst), sk->sk_state); + batostr(&bt_sk(sk)->dst), sk->sk_state); } read_unlock(&sco_sk_list.lock); @@ -1027,8 +1044,8 @@ int __init sco_init(void) } if (bt_debugfs) { - sco_debugfs = debugfs_create_file("sco", 0444, bt_debugfs, - NULL, &sco_debugfs_fops); + sco_debugfs = debugfs_create_file("sco", 0444, + bt_debugfs, NULL, &sco_debugfs_fops); if (!sco_debugfs) BT_ERR("Failed to create SCO debug file"); } diff --git a/trunk/net/bluetooth/smp.c b/trunk/net/bluetooth/smp.c index 16ef0dc85a0a..6fc7c4708f3e 100644 --- a/trunk/net/bluetooth/smp.c +++ b/trunk/net/bluetooth/smp.c @@ -20,15 +20,14 @@ SOFTWARE IS DISCLAIMED. */ -#include -#include -#include - #include #include #include #include #include +#include +#include +#include #define SMP_TIMEOUT msecs_to_jiffies(30000) @@ -649,7 +648,7 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb) auth |= (req->auth_req | rsp->auth_req) & SMP_AUTH_MITM; - ret = tk_request(conn, 0, auth, req->io_capability, rsp->io_capability); + ret = tk_request(conn, 0, auth, rsp->io_capability, req->io_capability); if (ret) return SMP_UNSPECIFIED; @@ -704,7 +703,7 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb) return 0; } -static u8 smp_ltk_encrypt(struct l2cap_conn *conn, u8 sec_level) +static u8 smp_ltk_encrypt(struct l2cap_conn *conn) { struct smp_ltk *key; struct hci_conn *hcon = conn->hcon; @@ -713,9 +712,6 @@ static u8 smp_ltk_encrypt(struct l2cap_conn *conn, u8 sec_level) if (!key) return 0; - if (sec_level > BT_SECURITY_MEDIUM && !key->authenticated) - return 0; - if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags)) return 1; @@ -736,7 +732,7 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb) hcon->pending_sec_level = authreq_to_seclevel(rp->auth_req); - if (smp_ltk_encrypt(conn, hcon->pending_sec_level)) + if (smp_ltk_encrypt(conn)) return 0; if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) @@ -775,7 +771,7 @@ int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level) return 1; if (hcon->link_mode & HCI_LM_MASTER) - if (smp_ltk_encrypt(conn, sec_level)) + if (smp_ltk_encrypt(conn)) goto done; if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) diff --git a/trunk/net/mac80211/cfg.c b/trunk/net/mac80211/cfg.c index 85ac364f4636..498c94e34427 100644 --- a/trunk/net/mac80211/cfg.c +++ b/trunk/net/mac80211/cfg.c @@ -2097,9 +2097,6 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy, struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); int i, ret; - if (!ieee80211_sdata_running(sdata)) - return -ENETDOWN; - if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) { ret = drv_set_bitrate_mask(local, sdata, mask); if (ret) diff --git a/trunk/net/mac80211/mlme.c b/trunk/net/mac80211/mlme.c index 079038d26a14..d7134c170336 100644 --- a/trunk/net/mac80211/mlme.c +++ b/trunk/net/mac80211/mlme.c @@ -1337,8 +1337,6 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, if (WARN_ON(!ifmgd->associated)) return; - ieee80211_stop_poll(sdata); - memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN); ifmgd->associated = NULL; @@ -2594,6 +2592,8 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata, struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; u8 frame_buf[DEAUTH_DISASSOC_LEN]; + ieee80211_stop_poll(sdata); + ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason, false, frame_buf); mutex_unlock(&ifmgd->mtx); diff --git a/trunk/net/mac80211/sta_info.h b/trunk/net/mac80211/sta_info.h index a470e1123a55..3bb24a121c95 100644 --- a/trunk/net/mac80211/sta_info.h +++ b/trunk/net/mac80211/sta_info.h @@ -271,9 +271,6 @@ struct sta_ampdu_mlme { * @plink_timer: peer link watch timer * @plink_timer_was_running: used by suspend/resume to restore timers * @t_offset: timing offset relative to this host - * @t_offset_setpoint: reference timing offset of this sta to be used when - * calculating clockdrift - * @ch_type: peer's channel type * @debugfs: debug filesystem info * @dead: set to true when sta is unlinked * @uploaded: set to true when sta is uploaded to the driver @@ -281,8 +278,6 @@ struct sta_ampdu_mlme { * @sta: station information we share with the driver * @sta_state: duplicates information about station state (for debug) * @beacon_loss_count: number of times beacon loss has triggered - * @supports_40mhz: tracks whether the station advertised 40 MHz support - * as we overwrite its HT parameters with the currently used value */ struct sta_info { /* General information, mostly static */ diff --git a/trunk/net/wireless/reg.c b/trunk/net/wireless/reg.c index baf5704740ee..15f347477a99 100644 --- a/trunk/net/wireless/reg.c +++ b/trunk/net/wireless/reg.c @@ -1389,7 +1389,7 @@ static void reg_set_request_processed(void) spin_unlock(®_requests_lock); if (last_request->initiator == NL80211_REGDOM_SET_BY_USER) - cancel_delayed_work(®_timeout); + cancel_delayed_work_sync(®_timeout); if (need_more_processing) schedule_work(®_work); diff --git a/trunk/net/wireless/util.c b/trunk/net/wireless/util.c index 316cfd00914f..8f2d68fc3a44 100644 --- a/trunk/net/wireless/util.c +++ b/trunk/net/wireless/util.c @@ -804,7 +804,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev, ntype == NL80211_IFTYPE_P2P_CLIENT)) return -EBUSY; - if (ntype != otype && netif_running(dev)) { + if (ntype != otype) { err = cfg80211_can_change_interface(rdev, dev->ieee80211_ptr, ntype); if (err)