mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-01-12 09:32:12 +00:00
Including fixes from wireless, Bluetooth and netfilter.
Current release - regressions:
- tcp: fix too slow tcp_rcvbuf_grow() action
- bluetooth: fix corruption in h4_recv_buf() after cleanup
Previous releases - regressions:
- mptcp: restore window probe
- bluetooth:
- fix connection cleanup with BIG with 2 or more BIS
- fix crash in set_mesh_sync and set_mesh_complete
- batman-adv: release references to inactive interfaces
- nic: ice: fix usage of logical PF id
- nic: sfc: fix potential memory leak in efx_mae_process_mport()
Previous releases - always broken:
- devmem: refresh devmem TX dst in case of route invalidation
- netfilter: add seqadj extension for natted connections
- wifi:
- iwlwifi: fix potential use after free in iwl_mld_remove_link()
- brcmfmac: fix crash while sending action frames in standalone AP Mode
- eth: mlx5e: cancel tls RX async resync request in error flows
- eth: ixgbe: fix memory leak and use-after-free in ixgbe_recovery_probe()
- eth: hibmcge: fix rx buf avl irq is not re-enabled in irq_handle issue
- eth: cxgb4: fix potential use-after-free in ipsec callback
- eth: nfp: fix memory leak in nfp_net_alloc()
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
-----BEGIN PGP SIGNATURE-----
iQJGBAABCAAwFiEEg1AjqC77wbdLX2LbKSR5jcyPE6QFAmkDctUSHHBhYmVuaUBy
ZWRoYXQuY29tAAoJECkkeY3MjxOkLSQP/1jXD3wLztnTT32+AItyg+R+Sf8EJN78
hJHI1Kp9zJIdgdwnokMMTohfuSjtrXJ3HSk6RyfZnd3lD8Qo1krp4Dy9CQR0lvzb
STNvjmA4Lbs/pS2EjSeMxAkrY0JbWyQ2X0NPrp8ZilqLYyGe8/wqq7eKR9OVZjwR
XMgFlpPncPhMAfTaN6TRHy+YoxGp+ItiGWdX1n0tW2cBpOMNH5lKHz3Qp19SvumU
G9ilscybTQP7EJQKvGsa76Lwl1djFFuK4BXTn3TTIieds943/l6lG5PvSOKVqhTl
Nth6+EsbkQiC/R6rLFdzzM5D8L2ZSD2on6AX7owDF5DN02PiwamwXc+1W8KWBI73
ShiWzYJ78m4nqesWMqCgCTcjv16npW9UuemeWavumAruRpEuy6Ffa3BAKTqDiwVX
tHFms8q8NCgEQ7r3IPdbNZwx3Z/Sig/wvAzrC23jrth1/wDeaJ2nqBmQ+S4dt9HC
wGTZyKfU0oGfvKydZQJjGZM+OOB3J1EeOad/+7fxikFFQ/KDBIUzeJZdzJa0Ba64
rkKR6McggdreYFfS3jVM8iwD+bLk77JBJGd3SQhFZlkk55RYCeIRHEEhVdX1tjon
bgr1Fh0hqyVQkDmJCUpj91y2ulUmI9CJRWybtoH2w8QR90pMSgC/egAtigo1595c
LY8LDoe4NxKD
=6//1
-----END PGP SIGNATURE-----
Merge tag 'net-6.18-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Pull networking fixes from Paolo Abeni:
"Including fixes from wireless, Bluetooth and netfilter.
Current release - regressions:
- tcp: fix too slow tcp_rcvbuf_grow() action
- bluetooth: fix corruption in h4_recv_buf() after cleanup
Previous releases - regressions:
- mptcp: restore window probe
- bluetooth:
- fix connection cleanup with BIG with 2 or more BIS
- fix crash in set_mesh_sync and set_mesh_complete
- batman-adv: release references to inactive interfaces
- nic:
- ice: fix usage of logical PF id
- sfc: fix potential memory leak in efx_mae_process_mport()
Previous releases - always broken:
- devmem: refresh devmem TX dst in case of route invalidation
- netfilter: add seqadj extension for natted connections
- wifi:
- iwlwifi: fix potential use after free in iwl_mld_remove_link()
- brcmfmac: fix crash while sending action frames in standalone AP Mode
- eth:
- mlx5e: cancel tls RX async resync request in error flows
- ixgbe: fix memory leak and use-after-free in ixgbe_recovery_probe()
- hibmcge: fix rx buf avl irq is not re-enabled in irq_handle issue
- cxgb4: fix potential use-after-free in ipsec callback
- nfp: fix memory leak in nfp_net_alloc()"
* tag 'net-6.18-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (75 commits)
net: sctp: fix KMSAN uninit-value in sctp_inq_pop
net: devmem: refresh devmem TX dst in case of route invalidation
net: stmmac: est: Fix GCL bounds checks
net: stmmac: Consider Tx VLAN offload tag length for maxSDU
net: stmmac: vlan: Disable 802.1AD tag insertion offload
net/mlx5e: kTLS, Cancel RX async resync request in error flows
net: tls: Cancel RX async resync request on rcd_delta overflow
net: tls: Change async resync helpers argument
net: phy: dp83869: fix STRAP_OPMODE bitmask
selftests: net: use BASH for bareudp testing
net: mctp: Fix tx queue stall
net/mlx5: Don't zero user_count when destroying FDB tables
net: usb: asix_devices: Check return value of usbnet_get_endpoints
mptcp: zero window probe mib
mptcp: restore window probe
mptcp: fix MSG_PEEK stream corruption
mptcp: drop bogus optimization in __mptcp_check_push()
netconsole: Fix race condition in between reader and writer of userdata
Documentation: netconsole: Remove obsolete contact people
nfp: xsk: fix memory leak in nfp_net_alloc()
...
This commit is contained in:
commit
e576349123
4
CREDITS
4
CREDITS
@ -2036,6 +2036,10 @@ S: Botanicka' 68a
|
||||
S: 602 00 Brno
|
||||
S: Czech Republic
|
||||
|
||||
N: Karsten Keil
|
||||
E: isdn@linux-pingi.de
|
||||
D: ISDN subsystem maintainer
|
||||
|
||||
N: Jakob Kemi
|
||||
E: jakob.kemi@telia.com
|
||||
D: V4L W9966 Webcam driver
|
||||
|
||||
@ -180,9 +180,9 @@ allOf:
|
||||
then:
|
||||
properties:
|
||||
reg:
|
||||
minItems: 2
|
||||
maxItems: 2
|
||||
reg-names:
|
||||
minItems: 2
|
||||
maxItems: 2
|
||||
else:
|
||||
properties:
|
||||
reg:
|
||||
|
||||
@ -605,6 +605,8 @@ operations:
|
||||
reply: &pin-attrs
|
||||
attributes:
|
||||
- id
|
||||
- module-name
|
||||
- clock-id
|
||||
- board-label
|
||||
- panel-label
|
||||
- package-label
|
||||
|
||||
@ -19,9 +19,6 @@ Userdata append support by Matthew Wood <thepacketgeek@gmail.com>, Jan 22 2024
|
||||
|
||||
Sysdata append support by Breno Leitao <leitao@debian.org>, Jan 15 2025
|
||||
|
||||
Please send bug reports to Matt Mackall <mpm@selenic.com>
|
||||
Satyam Sharma <satyam.sharma@gmail.com>, and Cong Wang <xiyou.wangcong@gmail.com>
|
||||
|
||||
Introduction:
|
||||
=============
|
||||
|
||||
|
||||
@ -13260,10 +13260,8 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git mast
|
||||
F: drivers/infiniband/ulp/isert
|
||||
|
||||
ISDN/CMTP OVER BLUETOOTH
|
||||
M: Karsten Keil <isdn@linux-pingi.de>
|
||||
L: isdn4linux@listserv.isdn4linux.de (subscribers-only)
|
||||
L: netdev@vger.kernel.org
|
||||
S: Odd Fixes
|
||||
S: Orphan
|
||||
W: http://www.isdn4linux.de
|
||||
F: Documentation/isdn/
|
||||
F: drivers/isdn/capi/
|
||||
@ -13272,10 +13270,8 @@ F: include/uapi/linux/isdn/
|
||||
F: net/bluetooth/cmtp/
|
||||
|
||||
ISDN/mISDN SUBSYSTEM
|
||||
M: Karsten Keil <isdn@linux-pingi.de>
|
||||
L: isdn4linux@listserv.isdn4linux.de (subscribers-only)
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
S: Orphan
|
||||
W: http://www.isdn4linux.de
|
||||
F: drivers/isdn/Kconfig
|
||||
F: drivers/isdn/Makefile
|
||||
@ -21332,6 +21328,7 @@ F: drivers/media/platform/qcom/venus/
|
||||
QUALCOMM WCN36XX WIRELESS DRIVER
|
||||
M: Loic Poulain <loic.poulain@oss.qualcomm.com>
|
||||
L: wcn36xx@lists.infradead.org
|
||||
L: linux-wireless@vger.kernel.org
|
||||
S: Supported
|
||||
W: https://wireless.wiki.kernel.org/en/users/Drivers/wcn36xx
|
||||
F: drivers/net/wireless/ath/wcn36xx/
|
||||
|
||||
@ -294,6 +294,8 @@ static int bcma_register_devices(struct bcma_bus *bus)
|
||||
int err;
|
||||
|
||||
list_for_each_entry(core, &bus->cores, list) {
|
||||
struct device_node *np;
|
||||
|
||||
/* We support that core ourselves */
|
||||
switch (core->id.id) {
|
||||
case BCMA_CORE_4706_CHIPCOMMON:
|
||||
@ -311,6 +313,10 @@ static int bcma_register_devices(struct bcma_bus *bus)
|
||||
if (bcma_is_core_needed_early(core->id.id))
|
||||
continue;
|
||||
|
||||
np = core->dev.of_node;
|
||||
if (np && !of_device_is_available(np))
|
||||
continue;
|
||||
|
||||
/* Only first GMAC core on BCM4706 is connected and working */
|
||||
if (core->id.id == BCMA_CORE_4706_MAC_GBIT &&
|
||||
core->core_unit > 0)
|
||||
|
||||
@ -41,6 +41,7 @@ struct bpa10x_data {
|
||||
struct usb_anchor rx_anchor;
|
||||
|
||||
struct sk_buff *rx_skb[2];
|
||||
struct hci_uart hu;
|
||||
};
|
||||
|
||||
static void bpa10x_tx_complete(struct urb *urb)
|
||||
@ -96,7 +97,7 @@ static void bpa10x_rx_complete(struct urb *urb)
|
||||
if (urb->status == 0) {
|
||||
bool idx = usb_pipebulk(urb->pipe);
|
||||
|
||||
data->rx_skb[idx] = h4_recv_buf(hdev, data->rx_skb[idx],
|
||||
data->rx_skb[idx] = h4_recv_buf(&data->hu, data->rx_skb[idx],
|
||||
urb->transfer_buffer,
|
||||
urb->actual_length,
|
||||
bpa10x_recv_pkts,
|
||||
@ -388,6 +389,7 @@ static int bpa10x_probe(struct usb_interface *intf,
|
||||
hci_set_drvdata(hdev, data);
|
||||
|
||||
data->hdev = hdev;
|
||||
data->hu.hdev = hdev;
|
||||
|
||||
SET_HCIDEV_DEV(hdev, &intf->dev);
|
||||
|
||||
|
||||
@ -1467,11 +1467,6 @@ static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
|
||||
if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP1)
|
||||
btintel_pcie_msix_gp1_handler(data);
|
||||
|
||||
/* This interrupt is triggered by the firmware after updating
|
||||
* boot_stage register and image_response register
|
||||
*/
|
||||
if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0)
|
||||
btintel_pcie_msix_gp0_handler(data);
|
||||
|
||||
/* For TX */
|
||||
if (intr_fh & BTINTEL_PCIE_MSIX_FH_INT_CAUSES_0) {
|
||||
@ -1487,6 +1482,12 @@ static irqreturn_t btintel_pcie_irq_msix_handler(int irq, void *dev_id)
|
||||
btintel_pcie_msix_tx_handle(data);
|
||||
}
|
||||
|
||||
/* This interrupt is triggered by the firmware after updating
|
||||
* boot_stage register and image_response register
|
||||
*/
|
||||
if (intr_hw & BTINTEL_PCIE_MSIX_HW_INT_CAUSES_GP0)
|
||||
btintel_pcie_msix_gp0_handler(data);
|
||||
|
||||
/*
|
||||
* Before sending the interrupt the HW disables it to prevent a nested
|
||||
* interrupt. This is done by writing 1 to the corresponding bit in
|
||||
|
||||
@ -1270,6 +1270,12 @@ static void btmtksdio_reset(struct hci_dev *hdev)
|
||||
|
||||
sdio_claim_host(bdev->func);
|
||||
|
||||
/* set drv_pmctrl if BT is closed before doing reset */
|
||||
if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state)) {
|
||||
sdio_enable_func(bdev->func);
|
||||
btmtksdio_drv_pmctrl(bdev);
|
||||
}
|
||||
|
||||
sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, NULL);
|
||||
skb_queue_purge(&bdev->txq);
|
||||
cancel_work_sync(&bdev->txrx_work);
|
||||
@ -1285,6 +1291,12 @@ static void btmtksdio_reset(struct hci_dev *hdev)
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* set fw_pmctrl back if BT is closed after doing reset */
|
||||
if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state)) {
|
||||
btmtksdio_fw_pmctrl(bdev);
|
||||
sdio_disable_func(bdev->func);
|
||||
}
|
||||
|
||||
clear_bit(BTMTKSDIO_PATCH_ENABLED, &bdev->tx_state);
|
||||
err:
|
||||
sdio_release_host(bdev->func);
|
||||
|
||||
@ -79,6 +79,7 @@ struct btmtkuart_dev {
|
||||
u16 stp_dlen;
|
||||
|
||||
const struct btmtkuart_data *data;
|
||||
struct hci_uart hu;
|
||||
};
|
||||
|
||||
#define btmtkuart_is_standalone(bdev) \
|
||||
@ -368,7 +369,7 @@ static void btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count)
|
||||
sz_left -= adv;
|
||||
p_left += adv;
|
||||
|
||||
bdev->rx_skb = h4_recv_buf(bdev->hdev, bdev->rx_skb, p_h4,
|
||||
bdev->rx_skb = h4_recv_buf(&bdev->hu, bdev->rx_skb, p_h4,
|
||||
sz_h4, mtk_recv_pkts,
|
||||
ARRAY_SIZE(mtk_recv_pkts));
|
||||
if (IS_ERR(bdev->rx_skb)) {
|
||||
@ -858,6 +859,7 @@ static int btmtkuart_probe(struct serdev_device *serdev)
|
||||
}
|
||||
|
||||
bdev->hdev = hdev;
|
||||
bdev->hu.hdev = hdev;
|
||||
|
||||
hdev->bus = HCI_UART;
|
||||
hci_set_drvdata(hdev, bdev);
|
||||
|
||||
@ -212,6 +212,7 @@ struct btnxpuart_dev {
|
||||
struct ps_data psdata;
|
||||
struct btnxpuart_data *nxp_data;
|
||||
struct reset_control *pdn;
|
||||
struct hci_uart hu;
|
||||
};
|
||||
|
||||
#define NXP_V1_FW_REQ_PKT 0xa5
|
||||
@ -1756,7 +1757,7 @@ static size_t btnxpuart_receive_buf(struct serdev_device *serdev,
|
||||
|
||||
ps_start_timer(nxpdev);
|
||||
|
||||
nxpdev->rx_skb = h4_recv_buf(nxpdev->hdev, nxpdev->rx_skb, data, count,
|
||||
nxpdev->rx_skb = h4_recv_buf(&nxpdev->hu, nxpdev->rx_skb, data, count,
|
||||
nxp_recv_pkts, ARRAY_SIZE(nxp_recv_pkts));
|
||||
if (IS_ERR(nxpdev->rx_skb)) {
|
||||
int err = PTR_ERR(nxpdev->rx_skb);
|
||||
@ -1875,6 +1876,7 @@ static int nxp_serdev_probe(struct serdev_device *serdev)
|
||||
reset_control_deassert(nxpdev->pdn);
|
||||
|
||||
nxpdev->hdev = hdev;
|
||||
nxpdev->hu.hdev = hdev;
|
||||
|
||||
hdev->bus = HCI_UART;
|
||||
hci_set_drvdata(hdev, nxpdev);
|
||||
|
||||
@ -105,7 +105,7 @@ static int ag6xx_recv(struct hci_uart *hu, const void *data, int count)
|
||||
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
|
||||
return -EUNATCH;
|
||||
|
||||
ag6xx->rx_skb = h4_recv_buf(hu->hdev, ag6xx->rx_skb, data, count,
|
||||
ag6xx->rx_skb = h4_recv_buf(hu, ag6xx->rx_skb, data, count,
|
||||
ag6xx_recv_pkts,
|
||||
ARRAY_SIZE(ag6xx_recv_pkts));
|
||||
if (IS_ERR(ag6xx->rx_skb)) {
|
||||
|
||||
@ -650,7 +650,7 @@ static int aml_recv(struct hci_uart *hu, const void *data, int count)
|
||||
struct aml_data *aml_data = hu->priv;
|
||||
int err;
|
||||
|
||||
aml_data->rx_skb = h4_recv_buf(hu->hdev, aml_data->rx_skb, data, count,
|
||||
aml_data->rx_skb = h4_recv_buf(hu, aml_data->rx_skb, data, count,
|
||||
aml_recv_pkts,
|
||||
ARRAY_SIZE(aml_recv_pkts));
|
||||
if (IS_ERR(aml_data->rx_skb)) {
|
||||
|
||||
@ -191,7 +191,7 @@ static int ath_recv(struct hci_uart *hu, const void *data, int count)
|
||||
{
|
||||
struct ath_struct *ath = hu->priv;
|
||||
|
||||
ath->rx_skb = h4_recv_buf(hu->hdev, ath->rx_skb, data, count,
|
||||
ath->rx_skb = h4_recv_buf(hu, ath->rx_skb, data, count,
|
||||
ath_recv_pkts, ARRAY_SIZE(ath_recv_pkts));
|
||||
if (IS_ERR(ath->rx_skb)) {
|
||||
int err = PTR_ERR(ath->rx_skb);
|
||||
|
||||
@ -698,7 +698,7 @@ static int bcm_recv(struct hci_uart *hu, const void *data, int count)
|
||||
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
|
||||
return -EUNATCH;
|
||||
|
||||
bcm->rx_skb = h4_recv_buf(hu->hdev, bcm->rx_skb, data, count,
|
||||
bcm->rx_skb = h4_recv_buf(hu, bcm->rx_skb, data, count,
|
||||
bcm_recv_pkts, ARRAY_SIZE(bcm_recv_pkts));
|
||||
if (IS_ERR(bcm->rx_skb)) {
|
||||
int err = PTR_ERR(bcm->rx_skb);
|
||||
|
||||
@ -112,7 +112,7 @@ static int h4_recv(struct hci_uart *hu, const void *data, int count)
|
||||
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
|
||||
return -EUNATCH;
|
||||
|
||||
h4->rx_skb = h4_recv_buf(hu->hdev, h4->rx_skb, data, count,
|
||||
h4->rx_skb = h4_recv_buf(hu, h4->rx_skb, data, count,
|
||||
h4_recv_pkts, ARRAY_SIZE(h4_recv_pkts));
|
||||
if (IS_ERR(h4->rx_skb)) {
|
||||
int err = PTR_ERR(h4->rx_skb);
|
||||
@ -151,12 +151,12 @@ int __exit h4_deinit(void)
|
||||
return hci_uart_unregister_proto(&h4p);
|
||||
}
|
||||
|
||||
struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
|
||||
struct sk_buff *h4_recv_buf(struct hci_uart *hu, struct sk_buff *skb,
|
||||
const unsigned char *buffer, int count,
|
||||
const struct h4_recv_pkt *pkts, int pkts_count)
|
||||
{
|
||||
struct hci_uart *hu = hci_get_drvdata(hdev);
|
||||
u8 alignment = hu->alignment ? hu->alignment : 1;
|
||||
struct hci_dev *hdev = hu->hdev;
|
||||
|
||||
/* Check for error from previous call */
|
||||
if (IS_ERR(skb))
|
||||
|
||||
@ -972,7 +972,7 @@ static int intel_recv(struct hci_uart *hu, const void *data, int count)
|
||||
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
|
||||
return -EUNATCH;
|
||||
|
||||
intel->rx_skb = h4_recv_buf(hu->hdev, intel->rx_skb, data, count,
|
||||
intel->rx_skb = h4_recv_buf(hu, intel->rx_skb, data, count,
|
||||
intel_recv_pkts,
|
||||
ARRAY_SIZE(intel_recv_pkts));
|
||||
if (IS_ERR(intel->rx_skb)) {
|
||||
|
||||
@ -429,7 +429,7 @@ static int ll_recv(struct hci_uart *hu, const void *data, int count)
|
||||
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
|
||||
return -EUNATCH;
|
||||
|
||||
ll->rx_skb = h4_recv_buf(hu->hdev, ll->rx_skb, data, count,
|
||||
ll->rx_skb = h4_recv_buf(hu, ll->rx_skb, data, count,
|
||||
ll_recv_pkts, ARRAY_SIZE(ll_recv_pkts));
|
||||
if (IS_ERR(ll->rx_skb)) {
|
||||
int err = PTR_ERR(ll->rx_skb);
|
||||
|
||||
@ -264,9 +264,9 @@ static int mrvl_recv(struct hci_uart *hu, const void *data, int count)
|
||||
!test_bit(STATE_FW_LOADED, &mrvl->flags))
|
||||
return count;
|
||||
|
||||
mrvl->rx_skb = h4_recv_buf(hu->hdev, mrvl->rx_skb, data, count,
|
||||
mrvl_recv_pkts,
|
||||
ARRAY_SIZE(mrvl_recv_pkts));
|
||||
mrvl->rx_skb = h4_recv_buf(hu, mrvl->rx_skb, data, count,
|
||||
mrvl_recv_pkts,
|
||||
ARRAY_SIZE(mrvl_recv_pkts));
|
||||
if (IS_ERR(mrvl->rx_skb)) {
|
||||
int err = PTR_ERR(mrvl->rx_skb);
|
||||
bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err);
|
||||
|
||||
@ -624,8 +624,8 @@ static int nokia_recv(struct hci_uart *hu, const void *data, int count)
|
||||
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
|
||||
return -EUNATCH;
|
||||
|
||||
btdev->rx_skb = h4_recv_buf(hu->hdev, btdev->rx_skb, data, count,
|
||||
nokia_recv_pkts, ARRAY_SIZE(nokia_recv_pkts));
|
||||
btdev->rx_skb = h4_recv_buf(hu, btdev->rx_skb, data, count,
|
||||
nokia_recv_pkts, ARRAY_SIZE(nokia_recv_pkts));
|
||||
if (IS_ERR(btdev->rx_skb)) {
|
||||
err = PTR_ERR(btdev->rx_skb);
|
||||
dev_err(dev, "Frame reassembly failed (%d)", err);
|
||||
|
||||
@ -1277,7 +1277,7 @@ static int qca_recv(struct hci_uart *hu, const void *data, int count)
|
||||
if (!test_bit(HCI_UART_REGISTERED, &hu->flags))
|
||||
return -EUNATCH;
|
||||
|
||||
qca->rx_skb = h4_recv_buf(hu->hdev, qca->rx_skb, data, count,
|
||||
qca->rx_skb = h4_recv_buf(hu, qca->rx_skb, data, count,
|
||||
qca_recv_pkts, ARRAY_SIZE(qca_recv_pkts));
|
||||
if (IS_ERR(qca->rx_skb)) {
|
||||
int err = PTR_ERR(qca->rx_skb);
|
||||
|
||||
@ -162,7 +162,7 @@ struct h4_recv_pkt {
|
||||
int h4_init(void);
|
||||
int h4_deinit(void);
|
||||
|
||||
struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
|
||||
struct sk_buff *h4_recv_buf(struct hci_uart *hu, struct sk_buff *skb,
|
||||
const unsigned char *buffer, int count,
|
||||
const struct h4_recv_pkt *pkts, int pkts_count);
|
||||
#endif
|
||||
|
||||
@ -1559,16 +1559,18 @@ int dpll_nl_pin_id_get_doit(struct sk_buff *skb, struct genl_info *info)
|
||||
return -EMSGSIZE;
|
||||
}
|
||||
pin = dpll_pin_find_from_nlattr(info);
|
||||
if (!IS_ERR(pin)) {
|
||||
if (!dpll_pin_available(pin)) {
|
||||
nlmsg_free(msg);
|
||||
return -ENODEV;
|
||||
}
|
||||
ret = dpll_msg_add_pin_handle(msg, pin);
|
||||
if (ret) {
|
||||
nlmsg_free(msg);
|
||||
return ret;
|
||||
}
|
||||
if (IS_ERR(pin)) {
|
||||
nlmsg_free(msg);
|
||||
return PTR_ERR(pin);
|
||||
}
|
||||
if (!dpll_pin_available(pin)) {
|
||||
nlmsg_free(msg);
|
||||
return -ENODEV;
|
||||
}
|
||||
ret = dpll_msg_add_pin_handle(msg, pin);
|
||||
if (ret) {
|
||||
nlmsg_free(msg);
|
||||
return ret;
|
||||
}
|
||||
genlmsg_end(msg, hdr);
|
||||
|
||||
@ -1735,12 +1737,14 @@ int dpll_nl_device_id_get_doit(struct sk_buff *skb, struct genl_info *info)
|
||||
}
|
||||
|
||||
dpll = dpll_device_find_from_nlattr(info);
|
||||
if (!IS_ERR(dpll)) {
|
||||
ret = dpll_msg_add_dev_handle(msg, dpll);
|
||||
if (ret) {
|
||||
nlmsg_free(msg);
|
||||
return ret;
|
||||
}
|
||||
if (IS_ERR(dpll)) {
|
||||
nlmsg_free(msg);
|
||||
return PTR_ERR(dpll);
|
||||
}
|
||||
ret = dpll_msg_add_dev_handle(msg, dpll);
|
||||
if (ret) {
|
||||
nlmsg_free(msg);
|
||||
return ret;
|
||||
}
|
||||
genlmsg_end(msg, hdr);
|
||||
|
||||
|
||||
@ -1904,7 +1904,7 @@ zl3073x_dpll_pin_is_registrable(struct zl3073x_dpll *zldpll,
|
||||
}
|
||||
|
||||
is_diff = zl3073x_out_is_diff(zldev, out);
|
||||
is_enabled = zl3073x_out_is_enabled(zldev, out);
|
||||
is_enabled = zl3073x_output_pin_is_enabled(zldev, index);
|
||||
}
|
||||
|
||||
/* Skip N-pin if the corresponding input/output is differential */
|
||||
|
||||
@ -290,9 +290,15 @@ static int ch_ipsec_xfrm_add_state(struct net_device *dev,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (unlikely(!try_module_get(THIS_MODULE))) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Failed to acquire module reference");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
|
||||
if (!sa_entry) {
|
||||
res = -ENOMEM;
|
||||
module_put(THIS_MODULE);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -301,7 +307,6 @@ static int ch_ipsec_xfrm_add_state(struct net_device *dev,
|
||||
sa_entry->esn = 1;
|
||||
ch_ipsec_setkey(x, sa_entry);
|
||||
x->xso.offload_handle = (unsigned long)sa_entry;
|
||||
try_module_get(THIS_MODULE);
|
||||
out:
|
||||
return res;
|
||||
}
|
||||
|
||||
@ -17,6 +17,7 @@
|
||||
#define HBG_PCU_CACHE_LINE_SIZE 32
|
||||
#define HBG_TX_TIMEOUT_BUF_LEN 1024
|
||||
#define HBG_RX_DESCR 0x01
|
||||
#define HBG_NO_PHY 0xFF
|
||||
|
||||
#define HBG_PACKET_HEAD_SIZE ((HBG_RX_SKIP1 + HBG_RX_SKIP2 + \
|
||||
HBG_RX_DESCR) * HBG_PCU_CACHE_LINE_SIZE)
|
||||
|
||||
@ -136,12 +136,11 @@ static pci_ers_result_t hbg_pci_err_detected(struct pci_dev *pdev,
|
||||
{
|
||||
struct net_device *netdev = pci_get_drvdata(pdev);
|
||||
|
||||
netif_device_detach(netdev);
|
||||
|
||||
if (state == pci_channel_io_perm_failure)
|
||||
if (state == pci_channel_io_perm_failure) {
|
||||
netif_device_detach(netdev);
|
||||
return PCI_ERS_RESULT_DISCONNECT;
|
||||
}
|
||||
|
||||
pci_disable_device(pdev);
|
||||
return PCI_ERS_RESULT_NEED_RESET;
|
||||
}
|
||||
|
||||
@ -150,6 +149,9 @@ static pci_ers_result_t hbg_pci_err_slot_reset(struct pci_dev *pdev)
|
||||
struct net_device *netdev = pci_get_drvdata(pdev);
|
||||
struct hbg_priv *priv = netdev_priv(netdev);
|
||||
|
||||
netif_device_detach(netdev);
|
||||
pci_disable_device(pdev);
|
||||
|
||||
if (pci_enable_device(pdev)) {
|
||||
dev_err(&pdev->dev,
|
||||
"failed to re-enable PCI device after reset\n");
|
||||
|
||||
@ -244,6 +244,9 @@ void hbg_hw_adjust_link(struct hbg_priv *priv, u32 speed, u32 duplex)
|
||||
|
||||
hbg_hw_mac_enable(priv, HBG_STATUS_ENABLE);
|
||||
|
||||
if (priv->mac.phy_addr == HBG_NO_PHY)
|
||||
return;
|
||||
|
||||
/* wait MAC link up */
|
||||
ret = readl_poll_timeout(priv->io_base + HBG_REG_AN_NEG_STATE_ADDR,
|
||||
link_status,
|
||||
|
||||
@ -32,6 +32,7 @@ static void hbg_irq_handle_rx_buf_val(struct hbg_priv *priv,
|
||||
const struct hbg_irq_info *irq_info)
|
||||
{
|
||||
priv->stats.rx_fifo_less_empty_thrsld_cnt++;
|
||||
hbg_hw_irq_enable(priv, irq_info->mask, true);
|
||||
}
|
||||
|
||||
#define HBG_IRQ_I(name, handle) \
|
||||
|
||||
@ -20,7 +20,6 @@
|
||||
#define HBG_MDIO_OP_INTERVAL_US (5 * 1000)
|
||||
|
||||
#define HBG_NP_LINK_FAIL_RETRY_TIMES 5
|
||||
#define HBG_NO_PHY 0xFF
|
||||
|
||||
static void hbg_mdio_set_command(struct hbg_mac *mac, u32 cmd)
|
||||
{
|
||||
|
||||
@ -9429,8 +9429,7 @@ static int hclge_mii_ioctl(struct hclge_dev *hdev, struct ifreq *ifr, int cmd)
|
||||
/* this command reads phy id and register at the same time */
|
||||
fallthrough;
|
||||
case SIOCGMIIREG:
|
||||
data->val_out = hclge_read_phy_reg(hdev, data->reg_num);
|
||||
return 0;
|
||||
return hclge_read_phy_reg(hdev, data->reg_num, &data->val_out);
|
||||
|
||||
case SIOCSMIIREG:
|
||||
return hclge_write_phy_reg(hdev, data->reg_num, data->val_in);
|
||||
|
||||
@ -274,7 +274,7 @@ void hclge_mac_stop_phy(struct hclge_dev *hdev)
|
||||
phy_stop(phydev);
|
||||
}
|
||||
|
||||
u16 hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr)
|
||||
int hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 *val)
|
||||
{
|
||||
struct hclge_phy_reg_cmd *req;
|
||||
struct hclge_desc desc;
|
||||
@ -286,11 +286,14 @@ u16 hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr)
|
||||
req->reg_addr = cpu_to_le16(reg_addr);
|
||||
|
||||
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
dev_err(&hdev->pdev->dev,
|
||||
"failed to read phy reg, ret = %d.\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return le16_to_cpu(req->reg_val);
|
||||
*val = le16_to_cpu(req->reg_val);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int hclge_write_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 val)
|
||||
|
||||
@ -13,7 +13,7 @@ int hclge_mac_connect_phy(struct hnae3_handle *handle);
|
||||
void hclge_mac_disconnect_phy(struct hnae3_handle *handle);
|
||||
void hclge_mac_start_phy(struct hclge_dev *hdev);
|
||||
void hclge_mac_stop_phy(struct hclge_dev *hdev);
|
||||
u16 hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr);
|
||||
int hclge_read_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 *val);
|
||||
int hclge_write_phy_reg(struct hclge_dev *hdev, u16 reg_addr, u16 val);
|
||||
|
||||
#endif
|
||||
|
||||
@ -4382,6 +4382,15 @@ int ice_get_phy_lane_number(struct ice_hw *hw)
|
||||
unsigned int lane;
|
||||
int err;
|
||||
|
||||
/* E82X does not have sequential IDs, lane number is PF ID.
|
||||
* For E825 device, the exception is the variant with external
|
||||
* PHY (0x579F), in which there is also 1:1 pf_id -> lane_number
|
||||
* mapping.
|
||||
*/
|
||||
if (hw->mac_type == ICE_MAC_GENERIC ||
|
||||
hw->device_id == ICE_DEV_ID_E825C_SGMII)
|
||||
return hw->pf_id;
|
||||
|
||||
options = kcalloc(ICE_AQC_PORT_OPT_MAX, sizeof(*options), GFP_KERNEL);
|
||||
if (!options)
|
||||
return -ENOMEM;
|
||||
@ -6496,6 +6505,28 @@ u32 ice_get_link_speed(u16 index)
|
||||
return ice_aq_to_link_speed[index];
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_get_dest_cgu - get destination CGU dev for given HW
|
||||
* @hw: pointer to the HW struct
|
||||
*
|
||||
* Get CGU client id for CGU register read/write operations.
|
||||
*
|
||||
* Return: CGU device id to use in SBQ transactions.
|
||||
*/
|
||||
static enum ice_sbq_dev_id ice_get_dest_cgu(struct ice_hw *hw)
|
||||
{
|
||||
/* On dual complex E825 only complex 0 has functional CGU powering all
|
||||
* the PHYs.
|
||||
* SBQ destination device cgu points to CGU on a current complex and to
|
||||
* access primary CGU from the secondary complex, the driver should use
|
||||
* cgu_peer as a destination device.
|
||||
*/
|
||||
if (hw->mac_type == ICE_MAC_GENERIC_3K_E825 && ice_is_dual(hw) &&
|
||||
!ice_is_primary(hw))
|
||||
return ice_sbq_dev_cgu_peer;
|
||||
return ice_sbq_dev_cgu;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_read_cgu_reg - Read a CGU register
|
||||
* @hw: Pointer to the HW struct
|
||||
@ -6510,8 +6541,8 @@ u32 ice_get_link_speed(u16 index)
|
||||
int ice_read_cgu_reg(struct ice_hw *hw, u32 addr, u32 *val)
|
||||
{
|
||||
struct ice_sbq_msg_input cgu_msg = {
|
||||
.dest_dev = ice_get_dest_cgu(hw),
|
||||
.opcode = ice_sbq_msg_rd,
|
||||
.dest_dev = ice_sbq_dev_cgu,
|
||||
.msg_addr_low = addr
|
||||
};
|
||||
int err;
|
||||
@ -6542,8 +6573,8 @@ int ice_read_cgu_reg(struct ice_hw *hw, u32 addr, u32 *val)
|
||||
int ice_write_cgu_reg(struct ice_hw *hw, u32 addr, u32 val)
|
||||
{
|
||||
struct ice_sbq_msg_input cgu_msg = {
|
||||
.dest_dev = ice_get_dest_cgu(hw),
|
||||
.opcode = ice_sbq_msg_wr,
|
||||
.dest_dev = ice_sbq_dev_cgu,
|
||||
.msg_addr_low = addr,
|
||||
.data = val
|
||||
};
|
||||
|
||||
@ -1479,7 +1479,7 @@ static void ice_init_prof_masks(struct ice_hw *hw, enum ice_block blk)
|
||||
per_pf = ICE_PROF_MASK_COUNT / hw->dev_caps.num_funcs;
|
||||
|
||||
hw->blk[blk].masks.count = per_pf;
|
||||
hw->blk[blk].masks.first = hw->pf_id * per_pf;
|
||||
hw->blk[blk].masks.first = hw->logical_pf_id * per_pf;
|
||||
|
||||
memset(hw->blk[blk].masks.masks, 0, sizeof(hw->blk[blk].masks.masks));
|
||||
|
||||
|
||||
@ -50,6 +50,7 @@ enum ice_sbq_dev_id {
|
||||
ice_sbq_dev_phy_0 = 0x02,
|
||||
ice_sbq_dev_cgu = 0x06,
|
||||
ice_sbq_dev_phy_0_peer = 0x0D,
|
||||
ice_sbq_dev_cgu_peer = 0x0F,
|
||||
};
|
||||
|
||||
enum ice_sbq_msg_opcode {
|
||||
|
||||
@ -2281,7 +2281,7 @@ static int igb_get_sset_count(struct net_device *netdev, int sset)
|
||||
case ETH_SS_PRIV_FLAGS:
|
||||
return IGB_PRIV_FLAGS_STR_LEN;
|
||||
default:
|
||||
return -ENOTSUPP;
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -810,7 +810,7 @@ static int igc_ethtool_get_sset_count(struct net_device *netdev, int sset)
|
||||
case ETH_SS_PRIV_FLAGS:
|
||||
return IGC_PRIV_FLAGS_STR_LEN;
|
||||
default:
|
||||
return -ENOTSUPP;
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
}
|
||||
|
||||
@ -2094,6 +2094,9 @@ static void igc_ethtool_diag_test(struct net_device *netdev,
|
||||
netdev_info(adapter->netdev, "Offline testing starting");
|
||||
set_bit(__IGC_TESTING, &adapter->state);
|
||||
|
||||
/* power up PHY for link test */
|
||||
igc_power_up_phy_copper(&adapter->hw);
|
||||
|
||||
/* Link test performed before hardware reset so autoneg doesn't
|
||||
* interfere with test result
|
||||
*/
|
||||
|
||||
@ -11507,10 +11507,10 @@ static int ixgbe_recovery_probe(struct ixgbe_adapter *adapter)
|
||||
shutdown_aci:
|
||||
mutex_destroy(&adapter->hw.aci.lock);
|
||||
ixgbe_release_hw_control(adapter);
|
||||
devlink_free(adapter->devlink);
|
||||
clean_up_probe:
|
||||
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
|
||||
free_netdev(netdev);
|
||||
devlink_free(adapter->devlink);
|
||||
pci_release_mem_regions(pdev);
|
||||
if (disable_dev)
|
||||
pci_disable_device(pdev);
|
||||
|
||||
@ -641,7 +641,7 @@ static int ixgbe_ptp_feature_enable(struct ptp_clock_info *ptp,
|
||||
* disabled
|
||||
*/
|
||||
if (rq->type != PTP_CLK_REQ_PPS || !adapter->ptp_setup_sdp)
|
||||
return -ENOTSUPP;
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (on)
|
||||
adapter->flags2 |= IXGBE_FLAG2_PTP_PPS_ENABLED;
|
||||
|
||||
@ -320,7 +320,6 @@ err_dma_unmap:
|
||||
err_free:
|
||||
kfree(buf);
|
||||
err_out:
|
||||
priv_rx->rq_stats->tls_resync_req_skip++;
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -339,14 +338,19 @@ static void resync_handle_work(struct work_struct *work)
|
||||
|
||||
if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) {
|
||||
mlx5e_ktls_priv_rx_put(priv_rx);
|
||||
priv_rx->rq_stats->tls_resync_req_skip++;
|
||||
tls_offload_rx_resync_async_request_cancel(&resync->core);
|
||||
return;
|
||||
}
|
||||
|
||||
c = resync->priv->channels.c[priv_rx->rxq];
|
||||
sq = &c->async_icosq;
|
||||
|
||||
if (resync_post_get_progress_params(sq, priv_rx))
|
||||
if (resync_post_get_progress_params(sq, priv_rx)) {
|
||||
priv_rx->rq_stats->tls_resync_req_skip++;
|
||||
tls_offload_rx_resync_async_request_cancel(&resync->core);
|
||||
mlx5e_ktls_priv_rx_put(priv_rx);
|
||||
}
|
||||
}
|
||||
|
||||
static void resync_init(struct mlx5e_ktls_rx_resync_ctx *resync,
|
||||
@ -425,14 +429,21 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
|
||||
{
|
||||
struct mlx5e_ktls_rx_resync_buf *buf = wi->tls_get_params.buf;
|
||||
struct mlx5e_ktls_offload_context_rx *priv_rx;
|
||||
struct tls_offload_resync_async *async_resync;
|
||||
struct tls_offload_context_rx *rx_ctx;
|
||||
u8 tracker_state, auth_state, *ctx;
|
||||
struct device *dev;
|
||||
u32 hw_seq;
|
||||
|
||||
priv_rx = buf->priv_rx;
|
||||
dev = mlx5_core_dma_dev(sq->channel->mdev);
|
||||
if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags)))
|
||||
rx_ctx = tls_offload_ctx_rx(tls_get_ctx(priv_rx->sk));
|
||||
async_resync = rx_ctx->resync_async;
|
||||
if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) {
|
||||
priv_rx->rq_stats->tls_resync_req_skip++;
|
||||
tls_offload_rx_resync_async_request_cancel(async_resync);
|
||||
goto out;
|
||||
}
|
||||
|
||||
dma_sync_single_for_cpu(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE,
|
||||
DMA_FROM_DEVICE);
|
||||
@ -443,11 +454,13 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
|
||||
if (tracker_state != MLX5E_TLS_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING ||
|
||||
auth_state != MLX5E_TLS_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD) {
|
||||
priv_rx->rq_stats->tls_resync_req_skip++;
|
||||
tls_offload_rx_resync_async_request_cancel(async_resync);
|
||||
goto out;
|
||||
}
|
||||
|
||||
hw_seq = MLX5_GET(tls_progress_params, ctx, hw_resync_tcp_sn);
|
||||
tls_offload_rx_resync_async_request_end(priv_rx->sk, cpu_to_be32(hw_seq));
|
||||
tls_offload_rx_resync_async_request_end(async_resync,
|
||||
cpu_to_be32(hw_seq));
|
||||
priv_rx->rq_stats->tls_resync_req_end++;
|
||||
out:
|
||||
mlx5e_ktls_priv_rx_put(priv_rx);
|
||||
@ -472,8 +485,10 @@ static bool resync_queue_get_psv(struct sock *sk)
|
||||
|
||||
resync = &priv_rx->resync;
|
||||
mlx5e_ktls_priv_rx_get(priv_rx);
|
||||
if (unlikely(!queue_work(resync->priv->tls->rx_wq, &resync->work)))
|
||||
if (unlikely(!queue_work(resync->priv->tls->rx_wq, &resync->work))) {
|
||||
mlx5e_ktls_priv_rx_put(priv_rx);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -482,6 +497,7 @@ static bool resync_queue_get_psv(struct sock *sk)
|
||||
static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb)
|
||||
{
|
||||
struct ethhdr *eth = (struct ethhdr *)(skb->data);
|
||||
struct tls_offload_resync_async *resync_async;
|
||||
struct net_device *netdev = rq->netdev;
|
||||
struct net *net = dev_net(netdev);
|
||||
struct sock *sk = NULL;
|
||||
@ -527,7 +543,8 @@ static void resync_update_sn(struct mlx5e_rq *rq, struct sk_buff *skb)
|
||||
|
||||
seq = th->seq;
|
||||
datalen = skb->len - depth;
|
||||
tls_offload_rx_resync_async_request_start(sk, seq, datalen);
|
||||
resync_async = tls_offload_ctx_rx(tls_get_ctx(sk))->resync_async;
|
||||
tls_offload_rx_resync_async_request_start(resync_async, seq, datalen);
|
||||
rq->stats->tls_resync_req_start++;
|
||||
|
||||
unref:
|
||||
@ -556,6 +573,18 @@ void mlx5e_ktls_rx_resync(struct net_device *netdev, struct sock *sk,
|
||||
resync_handle_seq_match(priv_rx, c);
|
||||
}
|
||||
|
||||
void
|
||||
mlx5e_ktls_rx_resync_async_request_cancel(struct mlx5e_icosq_wqe_info *wi)
|
||||
{
|
||||
struct mlx5e_ktls_offload_context_rx *priv_rx;
|
||||
struct mlx5e_ktls_rx_resync_buf *buf;
|
||||
|
||||
buf = wi->tls_get_params.buf;
|
||||
priv_rx = buf->priv_rx;
|
||||
priv_rx->rq_stats->tls_resync_req_skip++;
|
||||
tls_offload_rx_resync_async_request_cancel(&priv_rx->resync.core);
|
||||
}
|
||||
|
||||
/* End of resync section */
|
||||
|
||||
void mlx5e_ktls_handle_rx_skb(struct mlx5e_rq *rq, struct sk_buff *skb,
|
||||
|
||||
@ -29,6 +29,10 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
|
||||
void mlx5e_ktls_tx_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
|
||||
struct mlx5e_tx_wqe_info *wi,
|
||||
u32 *dma_fifo_cc);
|
||||
|
||||
void
|
||||
mlx5e_ktls_rx_resync_async_request_cancel(struct mlx5e_icosq_wqe_info *wi);
|
||||
|
||||
static inline bool
|
||||
mlx5e_ktls_tx_try_handle_resync_dump_comp(struct mlx5e_txqsq *sq,
|
||||
struct mlx5e_tx_wqe_info *wi,
|
||||
|
||||
@ -1036,6 +1036,10 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
|
||||
netdev_WARN_ONCE(cq->netdev,
|
||||
"Bad OP in ICOSQ CQE: 0x%x\n",
|
||||
get_cqe_opcode(cqe));
|
||||
#ifdef CONFIG_MLX5_EN_TLS
|
||||
if (wi->wqe_type == MLX5E_ICOSQ_WQE_GET_PSV_TLS)
|
||||
mlx5e_ktls_rx_resync_async_request_cancel(wi);
|
||||
#endif
|
||||
mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
|
||||
(struct mlx5_err_cqe *)cqe);
|
||||
mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
|
||||
|
||||
@ -66,7 +66,6 @@ static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
|
||||
esw->fdb_table.legacy.addr_grp = NULL;
|
||||
esw->fdb_table.legacy.allmulti_grp = NULL;
|
||||
esw->fdb_table.legacy.promisc_grp = NULL;
|
||||
atomic64_set(&esw->user_count, 0);
|
||||
}
|
||||
|
||||
static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
|
||||
|
||||
@ -1978,7 +1978,6 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
|
||||
/* Holds true only as long as DMFS is the default */
|
||||
mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
|
||||
MLX5_FLOW_STEERING_MODE_DMFS);
|
||||
atomic64_set(&esw->user_count, 0);
|
||||
}
|
||||
|
||||
static int esw_get_nr_ft_offloads_steering_src_ports(struct mlx5_eswitch *esw)
|
||||
|
||||
@ -2557,14 +2557,16 @@ nfp_net_alloc(struct pci_dev *pdev, const struct nfp_dev_info *dev_info,
|
||||
err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar,
|
||||
&nn->tlv_caps);
|
||||
if (err)
|
||||
goto err_free_nn;
|
||||
goto err_free_xsk_pools;
|
||||
|
||||
err = nfp_ccm_mbox_alloc(nn);
|
||||
if (err)
|
||||
goto err_free_nn;
|
||||
goto err_free_xsk_pools;
|
||||
|
||||
return nn;
|
||||
|
||||
err_free_xsk_pools:
|
||||
kfree(nn->dp.xsk_pools);
|
||||
err_free_nn:
|
||||
if (nn->dp.netdev)
|
||||
free_netdev(nn->dp.netdev);
|
||||
|
||||
@ -1090,6 +1090,9 @@ void efx_mae_remove_mport(void *desc, void *arg)
|
||||
kfree(mport);
|
||||
}
|
||||
|
||||
/*
|
||||
* Takes ownership of @desc, even if it returns an error
|
||||
*/
|
||||
static int efx_mae_process_mport(struct efx_nic *efx,
|
||||
struct mae_mport_desc *desc)
|
||||
{
|
||||
@ -1100,6 +1103,7 @@ static int efx_mae_process_mport(struct efx_nic *efx,
|
||||
if (!IS_ERR_OR_NULL(mport)) {
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"mport with id %u does exist!!!\n", desc->mport_id);
|
||||
kfree(desc);
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
|
||||
@ -4089,18 +4089,11 @@ static int stmmac_release(struct net_device *dev)
|
||||
static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
|
||||
struct stmmac_tx_queue *tx_q)
|
||||
{
|
||||
u16 tag = 0x0, inner_tag = 0x0;
|
||||
u32 inner_type = 0x0;
|
||||
struct dma_desc *p;
|
||||
u16 tag = 0x0;
|
||||
|
||||
if (!priv->dma_cap.vlins)
|
||||
if (!priv->dma_cap.vlins || !skb_vlan_tag_present(skb))
|
||||
return false;
|
||||
if (!skb_vlan_tag_present(skb))
|
||||
return false;
|
||||
if (skb->vlan_proto == htons(ETH_P_8021AD)) {
|
||||
inner_tag = skb_vlan_tag_get(skb);
|
||||
inner_type = STMMAC_VLAN_INSERT;
|
||||
}
|
||||
|
||||
tag = skb_vlan_tag_get(skb);
|
||||
|
||||
@ -4109,7 +4102,7 @@ static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
|
||||
else
|
||||
p = &tx_q->dma_tx[tx_q->cur_tx];
|
||||
|
||||
if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
|
||||
if (stmmac_set_desc_vlan_tag(priv, p, tag, 0x0, 0x0))
|
||||
return false;
|
||||
|
||||
stmmac_set_tx_owner(priv, p);
|
||||
@ -4507,6 +4500,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
bool has_vlan, set_ic;
|
||||
int entry, first_tx;
|
||||
dma_addr_t des;
|
||||
u32 sdu_len;
|
||||
|
||||
tx_q = &priv->dma_conf.tx_queue[queue];
|
||||
txq_stats = &priv->xstats.txq_stats[queue];
|
||||
@ -4524,10 +4518,15 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
}
|
||||
|
||||
if (priv->est && priv->est->enable &&
|
||||
priv->est->max_sdu[queue] &&
|
||||
skb->len > priv->est->max_sdu[queue]){
|
||||
priv->xstats.max_sdu_txq_drop[queue]++;
|
||||
goto max_sdu_err;
|
||||
priv->est->max_sdu[queue]) {
|
||||
sdu_len = skb->len;
|
||||
/* Add VLAN tag length if VLAN tag insertion offload is requested */
|
||||
if (priv->dma_cap.vlins && skb_vlan_tag_present(skb))
|
||||
sdu_len += VLAN_HLEN;
|
||||
if (sdu_len > priv->est->max_sdu[queue]) {
|
||||
priv->xstats.max_sdu_txq_drop[queue]++;
|
||||
goto max_sdu_err;
|
||||
}
|
||||
}
|
||||
|
||||
if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
|
||||
@ -7573,11 +7572,8 @@ int stmmac_dvr_probe(struct device *device,
|
||||
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
|
||||
}
|
||||
if (priv->dma_cap.vlins) {
|
||||
if (priv->dma_cap.vlins)
|
||||
ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
|
||||
if (priv->dma_cap.dvlan)
|
||||
ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
|
||||
}
|
||||
#endif
|
||||
priv->msg_enable = netif_msg_init(debug, default_msg_level);
|
||||
|
||||
|
||||
@ -981,7 +981,7 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
|
||||
if (qopt->cmd == TAPRIO_CMD_DESTROY)
|
||||
goto disable;
|
||||
|
||||
if (qopt->num_entries >= dep)
|
||||
if (qopt->num_entries > dep)
|
||||
return -EINVAL;
|
||||
if (!qopt->cycle_time)
|
||||
return -ERANGE;
|
||||
@ -1012,7 +1012,7 @@ static int tc_taprio_configure(struct stmmac_priv *priv,
|
||||
s64 delta_ns = qopt->entries[i].interval;
|
||||
u32 gates = qopt->entries[i].gate_mask;
|
||||
|
||||
if (delta_ns > GENMASK(wid, 0))
|
||||
if (delta_ns > GENMASK(wid - 1, 0))
|
||||
return -ERANGE;
|
||||
if (gates > GENMASK(31 - wid, 0))
|
||||
return -ERANGE;
|
||||
|
||||
@ -212,7 +212,7 @@ static void vlan_enable(struct mac_device_info *hw, u32 type)
|
||||
|
||||
value = readl(ioaddr + VLAN_INCL);
|
||||
value |= VLAN_VLTI;
|
||||
value |= VLAN_CSVL; /* Only use SVLAN */
|
||||
value &= ~VLAN_CSVL; /* Only use CVLAN */
|
||||
value &= ~VLAN_VLC;
|
||||
value |= (type << VLAN_VLC_SHIFT) & VLAN_VLC;
|
||||
writel(value, ioaddr + VLAN_INCL);
|
||||
|
||||
@ -96,11 +96,13 @@ static netdev_tx_t mctp_usb_start_xmit(struct sk_buff *skb,
|
||||
skb->data, skb->len,
|
||||
mctp_usb_out_complete, skb);
|
||||
|
||||
/* Stops TX queue first to prevent race condition with URB complete */
|
||||
netif_stop_queue(dev);
|
||||
rc = usb_submit_urb(urb, GFP_ATOMIC);
|
||||
if (rc)
|
||||
if (rc) {
|
||||
netif_wake_queue(dev);
|
||||
goto err_drop;
|
||||
else
|
||||
netif_stop_queue(dev);
|
||||
}
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
|
||||
@ -886,8 +886,11 @@ static ssize_t userdatum_value_show(struct config_item *item, char *buf)
|
||||
|
||||
static void update_userdata(struct netconsole_target *nt)
|
||||
{
|
||||
int complete_idx = 0, child_count = 0;
|
||||
struct list_head *entry;
|
||||
int child_count = 0;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&target_list_lock, flags);
|
||||
|
||||
/* Clear the current string in case the last userdatum was deleted */
|
||||
nt->userdata_length = 0;
|
||||
@ -897,8 +900,11 @@ static void update_userdata(struct netconsole_target *nt)
|
||||
struct userdatum *udm_item;
|
||||
struct config_item *item;
|
||||
|
||||
if (WARN_ON_ONCE(child_count >= MAX_EXTRADATA_ITEMS))
|
||||
break;
|
||||
if (child_count >= MAX_EXTRADATA_ITEMS) {
|
||||
spin_unlock_irqrestore(&target_list_lock, flags);
|
||||
WARN_ON_ONCE(1);
|
||||
return;
|
||||
}
|
||||
child_count++;
|
||||
|
||||
item = container_of(entry, struct config_item, ci_entry);
|
||||
@ -912,12 +918,11 @@ static void update_userdata(struct netconsole_target *nt)
|
||||
* one entry length (1/MAX_EXTRADATA_ITEMS long), entry count is
|
||||
* checked to not exceed MAX items with child_count above
|
||||
*/
|
||||
complete_idx += scnprintf(&nt->extradata_complete[complete_idx],
|
||||
MAX_EXTRADATA_ENTRY_LEN, " %s=%s\n",
|
||||
item->ci_name, udm_item->value);
|
||||
nt->userdata_length += scnprintf(&nt->extradata_complete[nt->userdata_length],
|
||||
MAX_EXTRADATA_ENTRY_LEN, " %s=%s\n",
|
||||
item->ci_name, udm_item->value);
|
||||
}
|
||||
nt->userdata_length = strnlen(nt->extradata_complete,
|
||||
sizeof(nt->extradata_complete));
|
||||
spin_unlock_irqrestore(&target_list_lock, flags);
|
||||
}
|
||||
|
||||
static ssize_t userdatum_value_store(struct config_item *item, const char *buf,
|
||||
|
||||
@ -738,6 +738,12 @@ static int dp83867_config_init(struct phy_device *phydev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Although the DP83867 reports EEE capability through the
|
||||
* MDIO_PCS_EEE_ABLE and MDIO_AN_EEE_ADV registers, the feature
|
||||
* is not actually implemented in hardware.
|
||||
*/
|
||||
phy_disable_eee(phydev);
|
||||
|
||||
if (phy_interface_is_rgmii(phydev) ||
|
||||
phydev->interface == PHY_INTERFACE_MODE_SGMII) {
|
||||
val = phy_read(phydev, MII_DP83867_PHYCTRL);
|
||||
|
||||
@ -84,7 +84,7 @@
|
||||
#define DP83869_CLK_DELAY_DEF 7
|
||||
|
||||
/* STRAP_STS1 bits */
|
||||
#define DP83869_STRAP_OP_MODE_MASK GENMASK(2, 0)
|
||||
#define DP83869_STRAP_OP_MODE_MASK GENMASK(11, 9)
|
||||
#define DP83869_STRAP_STS1_RESERVED BIT(11)
|
||||
#define DP83869_STRAP_MIRROR_ENABLED BIT(12)
|
||||
|
||||
@ -528,7 +528,7 @@ static int dp83869_set_strapped_mode(struct phy_device *phydev)
|
||||
if (val < 0)
|
||||
return val;
|
||||
|
||||
dp83869->mode = val & DP83869_STRAP_OP_MODE_MASK;
|
||||
dp83869->mode = FIELD_GET(DP83869_STRAP_OP_MODE_MASK, val);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -230,7 +230,9 @@ static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf)
|
||||
int i;
|
||||
unsigned long gpio_bits = dev->driver_info->data;
|
||||
|
||||
usbnet_get_endpoints(dev,intf);
|
||||
ret = usbnet_get_endpoints(dev, intf);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* Toggle the GPIOs in a manufacturer/model specific way */
|
||||
for (i = 2; i >= 0; i--) {
|
||||
@ -848,7 +850,9 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
|
||||
|
||||
dev->driver_priv = priv;
|
||||
|
||||
usbnet_get_endpoints(dev, intf);
|
||||
ret = usbnet_get_endpoints(dev, intf);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Maybe the boot loader passed the MAC address via device tree */
|
||||
if (!eth_platform_get_mac_address(&dev->udev->dev, buf)) {
|
||||
@ -1281,7 +1285,9 @@ static int ax88178_bind(struct usbnet *dev, struct usb_interface *intf)
|
||||
int ret;
|
||||
u8 buf[ETH_ALEN] = {0};
|
||||
|
||||
usbnet_get_endpoints(dev,intf);
|
||||
ret = usbnet_get_endpoints(dev, intf);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Get the MAC address */
|
||||
ret = asix_read_cmd(dev, AX_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, buf, 0);
|
||||
|
||||
@ -1659,6 +1659,8 @@ void usbnet_disconnect (struct usb_interface *intf)
|
||||
net = dev->net;
|
||||
unregister_netdev (net);
|
||||
|
||||
cancel_work_sync(&dev->kevent);
|
||||
|
||||
while ((urb = usb_get_from_anchor(&dev->deferred))) {
|
||||
dev_kfree_skb(urb->context);
|
||||
kfree(urb->sg);
|
||||
|
||||
@ -1379,9 +1379,14 @@ static struct sk_buff *virtnet_receive_xsk_merge(struct net_device *dev, struct
|
||||
ret = XDP_PASS;
|
||||
rcu_read_lock();
|
||||
prog = rcu_dereference(rq->xdp_prog);
|
||||
/* TODO: support multi buffer. */
|
||||
if (prog && num_buf == 1)
|
||||
ret = virtnet_xdp_handler(prog, xdp, dev, xdp_xmit, stats);
|
||||
if (prog) {
|
||||
/* TODO: support multi buffer. */
|
||||
if (num_buf == 1)
|
||||
ret = virtnet_xdp_handler(prog, xdp, dev, xdp_xmit,
|
||||
stats);
|
||||
else
|
||||
ret = XDP_ABORTED;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
switch (ret) {
|
||||
|
||||
@ -1937,6 +1937,7 @@ int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id)
|
||||
if (cmd_id == WMI_CMD_UNSUPPORTED) {
|
||||
ath10k_warn(ar, "wmi command %d is not supported by firmware\n",
|
||||
cmd_id);
|
||||
dev_kfree_skb_any(skb);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@ -912,42 +912,84 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
|
||||
static const struct dmi_system_id ath11k_pm_quirk_table[] = {
|
||||
{
|
||||
.driver_data = (void *)ATH11K_PM_WOW,
|
||||
.matches = {
|
||||
.matches = { /* X13 G4 AMD #1 */
|
||||
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "21J3"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.driver_data = (void *)ATH11K_PM_WOW,
|
||||
.matches = { /* X13 G4 AMD #2 */
|
||||
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "21J4"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.driver_data = (void *)ATH11K_PM_WOW,
|
||||
.matches = {
|
||||
.matches = { /* T14 G4 AMD #1 */
|
||||
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "21K3"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.driver_data = (void *)ATH11K_PM_WOW,
|
||||
.matches = { /* T14 G4 AMD #2 */
|
||||
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "21K4"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.driver_data = (void *)ATH11K_PM_WOW,
|
||||
.matches = {
|
||||
.matches = { /* P14s G4 AMD #1 */
|
||||
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "21K5"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.driver_data = (void *)ATH11K_PM_WOW,
|
||||
.matches = { /* P14s G4 AMD #2 */
|
||||
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "21K6"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.driver_data = (void *)ATH11K_PM_WOW,
|
||||
.matches = {
|
||||
.matches = { /* T16 G2 AMD #1 */
|
||||
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "21K7"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.driver_data = (void *)ATH11K_PM_WOW,
|
||||
.matches = { /* T16 G2 AMD #2 */
|
||||
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "21K8"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.driver_data = (void *)ATH11K_PM_WOW,
|
||||
.matches = {
|
||||
.matches = { /* P16s G2 AMD #1 */
|
||||
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "21K9"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.driver_data = (void *)ATH11K_PM_WOW,
|
||||
.matches = { /* P16s G2 AMD #2 */
|
||||
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "21KA"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.driver_data = (void *)ATH11K_PM_WOW,
|
||||
.matches = {
|
||||
.matches = { /* T14s G4 AMD #1 */
|
||||
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "21F8"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.driver_data = (void *)ATH11K_PM_WOW,
|
||||
.matches = { /* T14s G4 AMD #2 */
|
||||
DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "21F9"),
|
||||
},
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
// SPDX-License-Identifier: BSD-3-Clause-Clear
|
||||
/*
|
||||
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
|
||||
* Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
|
||||
* Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
|
||||
*/
|
||||
|
||||
#include <net/mac80211.h>
|
||||
@ -4417,9 +4417,9 @@ static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
|
||||
}
|
||||
|
||||
if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
|
||||
flags |= WMI_KEY_PAIRWISE;
|
||||
flags = WMI_KEY_PAIRWISE;
|
||||
else
|
||||
flags |= WMI_KEY_GROUP;
|
||||
flags = WMI_KEY_GROUP;
|
||||
|
||||
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
|
||||
"%s for peer %pM on vdev %d flags 0x%X, type = %d, num_sta %d\n",
|
||||
@ -4456,7 +4456,7 @@ static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
|
||||
|
||||
is_ap_with_no_sta = (vif->type == NL80211_IFTYPE_AP &&
|
||||
!arvif->num_stations);
|
||||
if ((flags & WMI_KEY_PAIRWISE) || cmd == SET_KEY || is_ap_with_no_sta) {
|
||||
if (flags == WMI_KEY_PAIRWISE || cmd == SET_KEY || is_ap_with_no_sta) {
|
||||
ret = ath11k_install_key(arvif, key, cmd, peer_addr, flags);
|
||||
if (ret) {
|
||||
ath11k_warn(ab, "ath11k_install_key failed (%d)\n", ret);
|
||||
@ -4470,7 +4470,7 @@ static int ath11k_mac_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
|
||||
goto exit;
|
||||
}
|
||||
|
||||
if ((flags & WMI_KEY_GROUP) && cmd == SET_KEY && is_ap_with_no_sta)
|
||||
if (flags == WMI_KEY_GROUP && cmd == SET_KEY && is_ap_with_no_sta)
|
||||
arvif->reinstall_group_keys = true;
|
||||
}
|
||||
|
||||
|
||||
@ -8290,23 +8290,32 @@ static void ath12k_mgmt_over_wmi_tx_drop(struct ath12k *ar, struct sk_buff *skb)
|
||||
wake_up(&ar->txmgmt_empty_waitq);
|
||||
}
|
||||
|
||||
int ath12k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx)
|
||||
static void ath12k_mac_tx_mgmt_free(struct ath12k *ar, int buf_id)
|
||||
{
|
||||
struct sk_buff *msdu = skb;
|
||||
struct sk_buff *msdu;
|
||||
struct ieee80211_tx_info *info;
|
||||
struct ath12k *ar = ctx;
|
||||
struct ath12k_base *ab = ar->ab;
|
||||
|
||||
spin_lock_bh(&ar->txmgmt_idr_lock);
|
||||
idr_remove(&ar->txmgmt_idr, buf_id);
|
||||
msdu = idr_remove(&ar->txmgmt_idr, buf_id);
|
||||
spin_unlock_bh(&ar->txmgmt_idr_lock);
|
||||
dma_unmap_single(ab->dev, ATH12K_SKB_CB(msdu)->paddr, msdu->len,
|
||||
|
||||
if (!msdu)
|
||||
return;
|
||||
|
||||
dma_unmap_single(ar->ab->dev, ATH12K_SKB_CB(msdu)->paddr, msdu->len,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
info = IEEE80211_SKB_CB(msdu);
|
||||
memset(&info->status, 0, sizeof(info->status));
|
||||
|
||||
ath12k_mgmt_over_wmi_tx_drop(ar, skb);
|
||||
ath12k_mgmt_over_wmi_tx_drop(ar, msdu);
|
||||
}
|
||||
|
||||
int ath12k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx)
|
||||
{
|
||||
struct ath12k *ar = ctx;
|
||||
|
||||
ath12k_mac_tx_mgmt_free(ar, buf_id);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -8315,17 +8324,10 @@ static int ath12k_mac_vif_txmgmt_idr_remove(int buf_id, void *skb, void *ctx)
|
||||
{
|
||||
struct ieee80211_vif *vif = ctx;
|
||||
struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
|
||||
struct sk_buff *msdu = skb;
|
||||
struct ath12k *ar = skb_cb->ar;
|
||||
struct ath12k_base *ab = ar->ab;
|
||||
|
||||
if (skb_cb->vif == vif) {
|
||||
spin_lock_bh(&ar->txmgmt_idr_lock);
|
||||
idr_remove(&ar->txmgmt_idr, buf_id);
|
||||
spin_unlock_bh(&ar->txmgmt_idr_lock);
|
||||
dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len,
|
||||
DMA_TO_DEVICE);
|
||||
}
|
||||
if (skb_cb->vif == vif)
|
||||
ath12k_mac_tx_mgmt_free(ar, buf_id);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -5627,8 +5627,7 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
|
||||
*cookie, le16_to_cpu(action_frame->len),
|
||||
le32_to_cpu(af_params->channel));
|
||||
|
||||
ack = brcmf_p2p_send_action_frame(cfg, cfg_to_ndev(cfg),
|
||||
af_params);
|
||||
ack = brcmf_p2p_send_action_frame(vif->ifp, af_params);
|
||||
|
||||
cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, ack,
|
||||
GFP_KERNEL);
|
||||
|
||||
@ -1529,6 +1529,7 @@ int brcmf_p2p_notify_action_tx_complete(struct brcmf_if *ifp,
|
||||
/**
|
||||
* brcmf_p2p_tx_action_frame() - send action frame over fil.
|
||||
*
|
||||
* @ifp: interface to transmit on.
|
||||
* @p2p: p2p info struct for vif.
|
||||
* @af_params: action frame data/info.
|
||||
*
|
||||
@ -1538,12 +1539,11 @@ int brcmf_p2p_notify_action_tx_complete(struct brcmf_if *ifp,
|
||||
* The WLC_E_ACTION_FRAME_COMPLETE event will be received when the action
|
||||
* frame is transmitted.
|
||||
*/
|
||||
static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p,
|
||||
static s32 brcmf_p2p_tx_action_frame(struct brcmf_if *ifp,
|
||||
struct brcmf_p2p_info *p2p,
|
||||
struct brcmf_fil_af_params_le *af_params)
|
||||
{
|
||||
struct brcmf_pub *drvr = p2p->cfg->pub;
|
||||
struct brcmf_cfg80211_vif *vif;
|
||||
struct brcmf_p2p_action_frame *p2p_af;
|
||||
s32 err = 0;
|
||||
|
||||
brcmf_dbg(TRACE, "Enter\n");
|
||||
@ -1552,14 +1552,7 @@ static s32 brcmf_p2p_tx_action_frame(struct brcmf_p2p_info *p2p,
|
||||
clear_bit(BRCMF_P2P_STATUS_ACTION_TX_COMPLETED, &p2p->status);
|
||||
clear_bit(BRCMF_P2P_STATUS_ACTION_TX_NOACK, &p2p->status);
|
||||
|
||||
/* check if it is a p2p_presence response */
|
||||
p2p_af = (struct brcmf_p2p_action_frame *)af_params->action_frame.data;
|
||||
if (p2p_af->subtype == P2P_AF_PRESENCE_RSP)
|
||||
vif = p2p->bss_idx[P2PAPI_BSSCFG_CONNECTION].vif;
|
||||
else
|
||||
vif = p2p->bss_idx[P2PAPI_BSSCFG_DEVICE].vif;
|
||||
|
||||
err = brcmf_fil_bsscfg_data_set(vif->ifp, "actframe", af_params,
|
||||
err = brcmf_fil_bsscfg_data_set(ifp, "actframe", af_params,
|
||||
sizeof(*af_params));
|
||||
if (err) {
|
||||
bphy_err(drvr, " sending action frame has failed\n");
|
||||
@ -1711,16 +1704,14 @@ static bool brcmf_p2p_check_dwell_overflow(u32 requested_dwell,
|
||||
/**
|
||||
* brcmf_p2p_send_action_frame() - send action frame .
|
||||
*
|
||||
* @cfg: driver private data for cfg80211 interface.
|
||||
* @ndev: net device to transmit on.
|
||||
* @ifp: interface to transmit on.
|
||||
* @af_params: configuration data for action frame.
|
||||
*/
|
||||
bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg,
|
||||
struct net_device *ndev,
|
||||
bool brcmf_p2p_send_action_frame(struct brcmf_if *ifp,
|
||||
struct brcmf_fil_af_params_le *af_params)
|
||||
{
|
||||
struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
|
||||
struct brcmf_p2p_info *p2p = &cfg->p2p;
|
||||
struct brcmf_if *ifp = netdev_priv(ndev);
|
||||
struct brcmf_fil_action_frame_le *action_frame;
|
||||
struct brcmf_config_af_params config_af_params;
|
||||
struct afx_hdl *afx_hdl = &p2p->afx_hdl;
|
||||
@ -1857,7 +1848,7 @@ bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg,
|
||||
if (af_params->channel)
|
||||
msleep(P2P_AF_RETRY_DELAY_TIME);
|
||||
|
||||
ack = !brcmf_p2p_tx_action_frame(p2p, af_params);
|
||||
ack = !brcmf_p2p_tx_action_frame(ifp, p2p, af_params);
|
||||
tx_retry++;
|
||||
dwell_overflow = brcmf_p2p_check_dwell_overflow(requested_dwell,
|
||||
dwell_jiffies);
|
||||
@ -2217,7 +2208,6 @@ static struct wireless_dev *brcmf_p2p_create_p2pdev(struct brcmf_p2p_info *p2p,
|
||||
|
||||
WARN_ON(p2p_ifp->bsscfgidx != bsscfgidx);
|
||||
|
||||
init_completion(&p2p->send_af_done);
|
||||
INIT_WORK(&p2p->afx_hdl.afx_work, brcmf_p2p_afx_handler);
|
||||
init_completion(&p2p->afx_hdl.act_frm_scan);
|
||||
init_completion(&p2p->wait_next_af);
|
||||
@ -2513,6 +2503,8 @@ s32 brcmf_p2p_attach(struct brcmf_cfg80211_info *cfg, bool p2pdev_forced)
|
||||
pri_ifp = brcmf_get_ifp(cfg->pub, 0);
|
||||
p2p->bss_idx[P2PAPI_BSSCFG_PRIMARY].vif = pri_ifp->vif;
|
||||
|
||||
init_completion(&p2p->send_af_done);
|
||||
|
||||
if (p2pdev_forced) {
|
||||
err_ptr = brcmf_p2p_create_p2pdev(p2p, NULL, NULL);
|
||||
if (IS_ERR(err_ptr)) {
|
||||
|
||||
@ -168,8 +168,7 @@ int brcmf_p2p_notify_action_frame_rx(struct brcmf_if *ifp,
|
||||
int brcmf_p2p_notify_action_tx_complete(struct brcmf_if *ifp,
|
||||
const struct brcmf_event_msg *e,
|
||||
void *data);
|
||||
bool brcmf_p2p_send_action_frame(struct brcmf_cfg80211_info *cfg,
|
||||
struct net_device *ndev,
|
||||
bool brcmf_p2p_send_action_frame(struct brcmf_if *ifp,
|
||||
struct brcmf_fil_af_params_le *af_params);
|
||||
bool brcmf_p2p_scan_finding_common_channel(struct brcmf_cfg80211_info *cfg,
|
||||
struct brcmf_bss_info_le *bi);
|
||||
|
||||
@ -501,6 +501,7 @@ void iwl_mld_remove_link(struct iwl_mld *mld,
|
||||
struct iwl_mld_vif *mld_vif = iwl_mld_vif_from_mac80211(bss_conf->vif);
|
||||
struct iwl_mld_link *link = iwl_mld_link_from_mac80211(bss_conf);
|
||||
bool is_deflink = link == &mld_vif->deflink;
|
||||
u8 fw_id = link->fw_id;
|
||||
|
||||
if (WARN_ON(!link || link->active))
|
||||
return;
|
||||
@ -513,10 +514,10 @@ void iwl_mld_remove_link(struct iwl_mld *mld,
|
||||
|
||||
RCU_INIT_POINTER(mld_vif->link[bss_conf->link_id], NULL);
|
||||
|
||||
if (WARN_ON(link->fw_id >= mld->fw->ucode_capa.num_links))
|
||||
if (WARN_ON(fw_id >= mld->fw->ucode_capa.num_links))
|
||||
return;
|
||||
|
||||
RCU_INIT_POINTER(mld->fw_id_to_bss_conf[link->fw_id], NULL);
|
||||
RCU_INIT_POINTER(mld->fw_id_to_bss_conf[fw_id], NULL);
|
||||
}
|
||||
|
||||
void iwl_mld_handle_missed_beacon_notif(struct iwl_mld *mld,
|
||||
|
||||
@ -434,6 +434,7 @@ enum {
|
||||
HCI_USER_CHANNEL,
|
||||
HCI_EXT_CONFIGURED,
|
||||
HCI_LE_ADV,
|
||||
HCI_LE_ADV_0,
|
||||
HCI_LE_PER_ADV,
|
||||
HCI_LE_SCAN,
|
||||
HCI_SSP_ENABLED,
|
||||
|
||||
@ -244,6 +244,7 @@ struct adv_info {
|
||||
bool enabled;
|
||||
bool pending;
|
||||
bool periodic;
|
||||
bool periodic_enabled;
|
||||
__u8 mesh;
|
||||
__u8 instance;
|
||||
__u8 handle;
|
||||
|
||||
@ -38,8 +38,8 @@
|
||||
#define L2CAP_DEFAULT_TX_WINDOW 63
|
||||
#define L2CAP_DEFAULT_EXT_WINDOW 0x3FFF
|
||||
#define L2CAP_DEFAULT_MAX_TX 3
|
||||
#define L2CAP_DEFAULT_RETRANS_TO 2 /* seconds */
|
||||
#define L2CAP_DEFAULT_MONITOR_TO 12 /* seconds */
|
||||
#define L2CAP_DEFAULT_RETRANS_TO 2000 /* 2 seconds */
|
||||
#define L2CAP_DEFAULT_MONITOR_TO 12000 /* 12 seconds */
|
||||
#define L2CAP_DEFAULT_MAX_PDU_SIZE 1492 /* Sized for AMP packet */
|
||||
#define L2CAP_DEFAULT_ACK_TO 200
|
||||
#define L2CAP_DEFAULT_MAX_SDU_SIZE 0xFFFF
|
||||
|
||||
@ -853,7 +853,7 @@ struct mgmt_cp_set_mesh {
|
||||
__le16 window;
|
||||
__le16 period;
|
||||
__u8 num_ad_types;
|
||||
__u8 ad_types[];
|
||||
__u8 ad_types[] __counted_by(num_ad_types);
|
||||
} __packed;
|
||||
#define MGMT_SET_MESH_RECEIVER_SIZE 6
|
||||
|
||||
|
||||
@ -370,7 +370,7 @@ void tcp_delack_timer_handler(struct sock *sk);
|
||||
int tcp_ioctl(struct sock *sk, int cmd, int *karg);
|
||||
enum skb_drop_reason tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
|
||||
void tcp_rcv_established(struct sock *sk, struct sk_buff *skb);
|
||||
void tcp_rcvbuf_grow(struct sock *sk);
|
||||
void tcp_rcvbuf_grow(struct sock *sk, u32 newval);
|
||||
void tcp_rcv_space_adjust(struct sock *sk);
|
||||
int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
|
||||
void tcp_twsk_destructor(struct sock *sk);
|
||||
|
||||
@ -451,25 +451,26 @@ static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)
|
||||
|
||||
/* Log all TLS record header TCP sequences in [seq, seq+len] */
|
||||
static inline void
|
||||
tls_offload_rx_resync_async_request_start(struct sock *sk, __be32 seq, u16 len)
|
||||
tls_offload_rx_resync_async_request_start(struct tls_offload_resync_async *resync_async,
|
||||
__be32 seq, u16 len)
|
||||
{
|
||||
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
||||
struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
|
||||
|
||||
atomic64_set(&rx_ctx->resync_async->req, ((u64)ntohl(seq) << 32) |
|
||||
atomic64_set(&resync_async->req, ((u64)ntohl(seq) << 32) |
|
||||
((u64)len << 16) | RESYNC_REQ | RESYNC_REQ_ASYNC);
|
||||
rx_ctx->resync_async->loglen = 0;
|
||||
rx_ctx->resync_async->rcd_delta = 0;
|
||||
resync_async->loglen = 0;
|
||||
resync_async->rcd_delta = 0;
|
||||
}
|
||||
|
||||
static inline void
|
||||
tls_offload_rx_resync_async_request_end(struct sock *sk, __be32 seq)
|
||||
tls_offload_rx_resync_async_request_end(struct tls_offload_resync_async *resync_async,
|
||||
__be32 seq)
|
||||
{
|
||||
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
||||
struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
|
||||
atomic64_set(&resync_async->req, ((u64)ntohl(seq) << 32) | RESYNC_REQ);
|
||||
}
|
||||
|
||||
atomic64_set(&rx_ctx->resync_async->req,
|
||||
((u64)ntohl(seq) << 32) | RESYNC_REQ);
|
||||
static inline void
|
||||
tls_offload_rx_resync_async_request_cancel(struct tls_offload_resync_async *resync_async)
|
||||
{
|
||||
atomic64_set(&resync_async->req, 0);
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
||||
@ -218,6 +218,9 @@ TRACE_EVENT(tcp_rcvbuf_grow,
|
||||
__field(__u32, space)
|
||||
__field(__u32, ooo_space)
|
||||
__field(__u32, rcvbuf)
|
||||
__field(__u32, rcv_ssthresh)
|
||||
__field(__u32, window_clamp)
|
||||
__field(__u32, rcv_wnd)
|
||||
__field(__u8, scaling_ratio)
|
||||
__field(__u16, sport)
|
||||
__field(__u16, dport)
|
||||
@ -245,6 +248,9 @@ TRACE_EVENT(tcp_rcvbuf_grow,
|
||||
tp->rcv_nxt;
|
||||
|
||||
__entry->rcvbuf = sk->sk_rcvbuf;
|
||||
__entry->rcv_ssthresh = tp->rcv_ssthresh;
|
||||
__entry->window_clamp = tp->window_clamp;
|
||||
__entry->rcv_wnd = tp->rcv_wnd;
|
||||
__entry->scaling_ratio = tp->scaling_ratio;
|
||||
__entry->sport = ntohs(inet->inet_sport);
|
||||
__entry->dport = ntohs(inet->inet_dport);
|
||||
@ -264,11 +270,14 @@ TRACE_EVENT(tcp_rcvbuf_grow,
|
||||
),
|
||||
|
||||
TP_printk("time=%u rtt_us=%u copied=%u inq=%u space=%u ooo=%u scaling_ratio=%u rcvbuf=%u "
|
||||
"rcv_ssthresh=%u window_clamp=%u rcv_wnd=%u "
|
||||
"family=%s sport=%hu dport=%hu saddr=%pI4 daddr=%pI4 "
|
||||
"saddrv6=%pI6c daddrv6=%pI6c skaddr=%p sock_cookie=%llx",
|
||||
__entry->time, __entry->rtt_us, __entry->copied,
|
||||
__entry->inq, __entry->space, __entry->ooo_space,
|
||||
__entry->scaling_ratio, __entry->rcvbuf,
|
||||
__entry->rcv_ssthresh, __entry->window_clamp,
|
||||
__entry->rcv_wnd,
|
||||
show_family_name(__entry->family),
|
||||
__entry->sport, __entry->dport,
|
||||
__entry->saddr, __entry->daddr,
|
||||
|
||||
@ -763,11 +763,16 @@ int batadv_hardif_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb)
|
||||
bat_priv = netdev_priv(mesh_iface);
|
||||
|
||||
primary_if = batadv_primary_if_get_selected(bat_priv);
|
||||
if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
|
||||
if (!primary_if) {
|
||||
ret = -ENOENT;
|
||||
goto out_put_mesh_iface;
|
||||
}
|
||||
|
||||
if (primary_if->if_status != BATADV_IF_ACTIVE) {
|
||||
ret = -ENOENT;
|
||||
goto out_put_primary_if;
|
||||
}
|
||||
|
||||
hard_iface = batadv_netlink_get_hardif(bat_priv, cb);
|
||||
if (IS_ERR(hard_iface) && PTR_ERR(hard_iface) != -ENONET) {
|
||||
ret = PTR_ERR(hard_iface);
|
||||
@ -1327,11 +1332,16 @@ int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb)
|
||||
bat_priv = netdev_priv(mesh_iface);
|
||||
|
||||
primary_if = batadv_primary_if_get_selected(bat_priv);
|
||||
if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
|
||||
if (!primary_if) {
|
||||
ret = -ENOENT;
|
||||
goto out_put_mesh_iface;
|
||||
}
|
||||
|
||||
if (primary_if->if_status != BATADV_IF_ACTIVE) {
|
||||
ret = -ENOENT;
|
||||
goto out_put_primary_if;
|
||||
}
|
||||
|
||||
hard_iface = batadv_netlink_get_hardif(bat_priv, cb);
|
||||
if (IS_ERR(hard_iface) && PTR_ERR(hard_iface) != -ENONET) {
|
||||
ret = PTR_ERR(hard_iface);
|
||||
|
||||
@ -843,6 +843,13 @@ static void bis_cleanup(struct hci_conn *conn)
|
||||
if (bis)
|
||||
return;
|
||||
|
||||
bis = hci_conn_hash_lookup_big_state(hdev,
|
||||
conn->iso_qos.bcast.big,
|
||||
BT_OPEN,
|
||||
HCI_ROLE_MASTER);
|
||||
if (bis)
|
||||
return;
|
||||
|
||||
hci_le_terminate_big(hdev, conn);
|
||||
} else {
|
||||
hci_le_big_terminate(hdev, conn->iso_qos.bcast.big,
|
||||
|
||||
@ -1607,8 +1607,10 @@ static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data,
|
||||
|
||||
hci_dev_set_flag(hdev, HCI_LE_ADV);
|
||||
|
||||
if (adv && !adv->periodic)
|
||||
if (adv)
|
||||
adv->enabled = true;
|
||||
else if (!set->handle)
|
||||
hci_dev_set_flag(hdev, HCI_LE_ADV_0);
|
||||
|
||||
conn = hci_lookup_le_connect(hdev);
|
||||
if (conn)
|
||||
@ -1619,6 +1621,8 @@ static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data,
|
||||
if (cp->num_of_sets) {
|
||||
if (adv)
|
||||
adv->enabled = false;
|
||||
else if (!set->handle)
|
||||
hci_dev_clear_flag(hdev, HCI_LE_ADV_0);
|
||||
|
||||
/* If just one instance was disabled check if there are
|
||||
* any other instance enabled before clearing HCI_LE_ADV
|
||||
@ -3959,8 +3963,11 @@ static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data,
|
||||
hci_dev_set_flag(hdev, HCI_LE_PER_ADV);
|
||||
|
||||
if (adv)
|
||||
adv->enabled = true;
|
||||
adv->periodic_enabled = true;
|
||||
} else {
|
||||
if (adv)
|
||||
adv->periodic_enabled = false;
|
||||
|
||||
/* If just one instance was disabled check if there are
|
||||
* any other instance enabled before clearing HCI_LE_PER_ADV.
|
||||
* The current periodic adv instance will be marked as
|
||||
|
||||
@ -863,11 +863,17 @@ bool hci_cmd_sync_dequeue_once(struct hci_dev *hdev,
|
||||
{
|
||||
struct hci_cmd_sync_work_entry *entry;
|
||||
|
||||
entry = hci_cmd_sync_lookup_entry(hdev, func, data, destroy);
|
||||
if (!entry)
|
||||
return false;
|
||||
mutex_lock(&hdev->cmd_sync_work_lock);
|
||||
|
||||
hci_cmd_sync_cancel_entry(hdev, entry);
|
||||
entry = _hci_cmd_sync_lookup_entry(hdev, func, data, destroy);
|
||||
if (!entry) {
|
||||
mutex_unlock(&hdev->cmd_sync_work_lock);
|
||||
return false;
|
||||
}
|
||||
|
||||
_hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
|
||||
|
||||
mutex_unlock(&hdev->cmd_sync_work_lock);
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -1601,7 +1607,7 @@ int hci_disable_per_advertising_sync(struct hci_dev *hdev, u8 instance)
|
||||
|
||||
/* If periodic advertising already disabled there is nothing to do. */
|
||||
adv = hci_find_adv_instance(hdev, instance);
|
||||
if (!adv || !adv->periodic || !adv->enabled)
|
||||
if (!adv || !adv->periodic_enabled)
|
||||
return 0;
|
||||
|
||||
memset(&cp, 0, sizeof(cp));
|
||||
@ -1666,7 +1672,7 @@ static int hci_enable_per_advertising_sync(struct hci_dev *hdev, u8 instance)
|
||||
|
||||
/* If periodic advertising already enabled there is nothing to do. */
|
||||
adv = hci_find_adv_instance(hdev, instance);
|
||||
if (adv && adv->periodic && adv->enabled)
|
||||
if (adv && adv->periodic_enabled)
|
||||
return 0;
|
||||
|
||||
memset(&cp, 0, sizeof(cp));
|
||||
@ -2600,9 +2606,8 @@ static int hci_resume_advertising_sync(struct hci_dev *hdev)
|
||||
/* If current advertising instance is set to instance 0x00
|
||||
* then we need to re-enable it.
|
||||
*/
|
||||
if (!hdev->cur_adv_instance)
|
||||
err = hci_enable_ext_advertising_sync(hdev,
|
||||
hdev->cur_adv_instance);
|
||||
if (hci_dev_test_and_clear_flag(hdev, HCI_LE_ADV_0))
|
||||
err = hci_enable_ext_advertising_sync(hdev, 0x00);
|
||||
} else {
|
||||
/* Schedule for most recent instance to be restarted and begin
|
||||
* the software rotation loop
|
||||
|
||||
@ -2032,7 +2032,7 @@ static void iso_conn_ready(struct iso_conn *conn)
|
||||
*/
|
||||
if (!bacmp(&hcon->dst, BDADDR_ANY)) {
|
||||
bacpy(&hcon->dst, &iso_pi(parent)->dst);
|
||||
hcon->dst_type = iso_pi(parent)->dst_type;
|
||||
hcon->dst_type = le_addr_type(iso_pi(parent)->dst_type);
|
||||
}
|
||||
|
||||
if (test_bit(HCI_CONN_PA_SYNC, &hcon->flags)) {
|
||||
@ -2046,7 +2046,13 @@ static void iso_conn_ready(struct iso_conn *conn)
|
||||
}
|
||||
|
||||
bacpy(&iso_pi(sk)->dst, &hcon->dst);
|
||||
iso_pi(sk)->dst_type = hcon->dst_type;
|
||||
|
||||
/* Convert from HCI to three-value type */
|
||||
if (hcon->dst_type == ADDR_LE_DEV_PUBLIC)
|
||||
iso_pi(sk)->dst_type = BDADDR_LE_PUBLIC;
|
||||
else
|
||||
iso_pi(sk)->dst_type = BDADDR_LE_RANDOM;
|
||||
|
||||
iso_pi(sk)->sync_handle = iso_pi(parent)->sync_handle;
|
||||
memcpy(iso_pi(sk)->base, iso_pi(parent)->base, iso_pi(parent)->base_len);
|
||||
iso_pi(sk)->base_len = iso_pi(parent)->base_len;
|
||||
|
||||
@ -282,7 +282,7 @@ static void __set_retrans_timer(struct l2cap_chan *chan)
|
||||
if (!delayed_work_pending(&chan->monitor_timer) &&
|
||||
chan->retrans_timeout) {
|
||||
l2cap_set_timer(chan, &chan->retrans_timer,
|
||||
secs_to_jiffies(chan->retrans_timeout));
|
||||
msecs_to_jiffies(chan->retrans_timeout));
|
||||
}
|
||||
}
|
||||
|
||||
@ -291,7 +291,7 @@ static void __set_monitor_timer(struct l2cap_chan *chan)
|
||||
__clear_retrans_timer(chan);
|
||||
if (chan->monitor_timeout) {
|
||||
l2cap_set_timer(chan, &chan->monitor_timer,
|
||||
secs_to_jiffies(chan->monitor_timeout));
|
||||
msecs_to_jiffies(chan->monitor_timeout));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -2175,19 +2175,24 @@ static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
|
||||
sk = cmd->sk;
|
||||
|
||||
if (status) {
|
||||
mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER,
|
||||
status);
|
||||
mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev, true,
|
||||
cmd_status_rsp, &status);
|
||||
return;
|
||||
goto done;
|
||||
}
|
||||
|
||||
mgmt_pending_remove(cmd);
|
||||
mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0);
|
||||
|
||||
done:
|
||||
mgmt_pending_free(cmd);
|
||||
}
|
||||
|
||||
static int set_mesh_sync(struct hci_dev *hdev, void *data)
|
||||
{
|
||||
struct mgmt_pending_cmd *cmd = data;
|
||||
struct mgmt_cp_set_mesh cp;
|
||||
DEFINE_FLEX(struct mgmt_cp_set_mesh, cp, ad_types, num_ad_types,
|
||||
sizeof(hdev->mesh_ad_types));
|
||||
size_t len;
|
||||
|
||||
mutex_lock(&hdev->mgmt_pending_lock);
|
||||
@ -2197,27 +2202,26 @@ static int set_mesh_sync(struct hci_dev *hdev, void *data)
|
||||
return -ECANCELED;
|
||||
}
|
||||
|
||||
memcpy(&cp, cmd->param, sizeof(cp));
|
||||
len = cmd->param_len;
|
||||
memcpy(cp, cmd->param, min(__struct_size(cp), len));
|
||||
|
||||
mutex_unlock(&hdev->mgmt_pending_lock);
|
||||
|
||||
len = cmd->param_len;
|
||||
|
||||
memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
|
||||
|
||||
if (cp.enable)
|
||||
if (cp->enable)
|
||||
hci_dev_set_flag(hdev, HCI_MESH);
|
||||
else
|
||||
hci_dev_clear_flag(hdev, HCI_MESH);
|
||||
|
||||
hdev->le_scan_interval = __le16_to_cpu(cp.period);
|
||||
hdev->le_scan_window = __le16_to_cpu(cp.window);
|
||||
hdev->le_scan_interval = __le16_to_cpu(cp->period);
|
||||
hdev->le_scan_window = __le16_to_cpu(cp->window);
|
||||
|
||||
len -= sizeof(cp);
|
||||
len -= sizeof(struct mgmt_cp_set_mesh);
|
||||
|
||||
/* If filters don't fit, forward all adv pkts */
|
||||
if (len <= sizeof(hdev->mesh_ad_types))
|
||||
memcpy(hdev->mesh_ad_types, cp.ad_types, len);
|
||||
memcpy(hdev->mesh_ad_types, cp->ad_types, len);
|
||||
|
||||
hci_update_passive_scan_sync(hdev);
|
||||
return 0;
|
||||
|
||||
@ -643,8 +643,8 @@ static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig)
|
||||
tty_port_tty_hangup(&dev->port, true);
|
||||
|
||||
dev->modem_status =
|
||||
((v24_sig & RFCOMM_V24_RTC) ? (TIOCM_DSR | TIOCM_DTR) : 0) |
|
||||
((v24_sig & RFCOMM_V24_RTR) ? (TIOCM_RTS | TIOCM_CTS) : 0) |
|
||||
((v24_sig & RFCOMM_V24_RTC) ? TIOCM_DSR : 0) |
|
||||
((v24_sig & RFCOMM_V24_RTR) ? TIOCM_CTS : 0) |
|
||||
((v24_sig & RFCOMM_V24_IC) ? TIOCM_RI : 0) |
|
||||
((v24_sig & RFCOMM_V24_DV) ? TIOCM_CD : 0);
|
||||
}
|
||||
@ -1055,10 +1055,14 @@ static void rfcomm_tty_hangup(struct tty_struct *tty)
|
||||
static int rfcomm_tty_tiocmget(struct tty_struct *tty)
|
||||
{
|
||||
struct rfcomm_dev *dev = tty->driver_data;
|
||||
struct rfcomm_dlc *dlc = dev->dlc;
|
||||
u8 v24_sig;
|
||||
|
||||
BT_DBG("tty %p dev %p", tty, dev);
|
||||
|
||||
return dev->modem_status;
|
||||
rfcomm_dlc_get_modem_status(dlc, &v24_sig);
|
||||
|
||||
return (v24_sig & (TIOCM_DTR | TIOCM_RTS)) | dev->modem_status;
|
||||
}
|
||||
|
||||
static int rfcomm_tty_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear)
|
||||
@ -1071,23 +1075,15 @@ static int rfcomm_tty_tiocmset(struct tty_struct *tty, unsigned int set, unsigne
|
||||
|
||||
rfcomm_dlc_get_modem_status(dlc, &v24_sig);
|
||||
|
||||
if (set & TIOCM_DSR || set & TIOCM_DTR)
|
||||
if (set & TIOCM_DTR)
|
||||
v24_sig |= RFCOMM_V24_RTC;
|
||||
if (set & TIOCM_RTS || set & TIOCM_CTS)
|
||||
if (set & TIOCM_RTS)
|
||||
v24_sig |= RFCOMM_V24_RTR;
|
||||
if (set & TIOCM_RI)
|
||||
v24_sig |= RFCOMM_V24_IC;
|
||||
if (set & TIOCM_CD)
|
||||
v24_sig |= RFCOMM_V24_DV;
|
||||
|
||||
if (clear & TIOCM_DSR || clear & TIOCM_DTR)
|
||||
if (clear & TIOCM_DTR)
|
||||
v24_sig &= ~RFCOMM_V24_RTC;
|
||||
if (clear & TIOCM_RTS || clear & TIOCM_CTS)
|
||||
if (clear & TIOCM_RTS)
|
||||
v24_sig &= ~RFCOMM_V24_RTR;
|
||||
if (clear & TIOCM_RI)
|
||||
v24_sig &= ~RFCOMM_V24_IC;
|
||||
if (clear & TIOCM_CD)
|
||||
v24_sig &= ~RFCOMM_V24_DV;
|
||||
|
||||
rfcomm_dlc_set_modem_status(dlc, v24_sig);
|
||||
|
||||
|
||||
@ -17,6 +17,7 @@
|
||||
#include <net/page_pool/helpers.h>
|
||||
#include <net/page_pool/memory_provider.h>
|
||||
#include <net/sock.h>
|
||||
#include <net/tcp.h>
|
||||
#include <trace/events/page_pool.h>
|
||||
|
||||
#include "devmem.h"
|
||||
@ -357,7 +358,8 @@ struct net_devmem_dmabuf_binding *net_devmem_get_binding(struct sock *sk,
|
||||
unsigned int dmabuf_id)
|
||||
{
|
||||
struct net_devmem_dmabuf_binding *binding;
|
||||
struct dst_entry *dst = __sk_dst_get(sk);
|
||||
struct net_device *dst_dev;
|
||||
struct dst_entry *dst;
|
||||
int err = 0;
|
||||
|
||||
binding = net_devmem_lookup_dmabuf(dmabuf_id);
|
||||
@ -366,16 +368,35 @@ struct net_devmem_dmabuf_binding *net_devmem_get_binding(struct sock *sk,
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
dst = __sk_dst_get(sk);
|
||||
/* If dst is NULL (route expired), attempt to rebuild it. */
|
||||
if (unlikely(!dst)) {
|
||||
if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) {
|
||||
err = -EHOSTUNREACH;
|
||||
goto out_unlock;
|
||||
}
|
||||
dst = __sk_dst_get(sk);
|
||||
if (unlikely(!dst)) {
|
||||
err = -ENODEV;
|
||||
goto out_unlock;
|
||||
}
|
||||
}
|
||||
|
||||
/* The dma-addrs in this binding are only reachable to the corresponding
|
||||
* net_device.
|
||||
*/
|
||||
if (!dst || !dst->dev || dst->dev->ifindex != binding->dev->ifindex) {
|
||||
dst_dev = dst_dev_rcu(dst);
|
||||
if (unlikely(!dst_dev) || unlikely(dst_dev != binding->dev)) {
|
||||
err = -ENODEV;
|
||||
goto out_err;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
return binding;
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
out_err:
|
||||
if (binding)
|
||||
net_devmem_dmabuf_binding_put(binding);
|
||||
|
||||
@ -891,18 +891,27 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,
|
||||
}
|
||||
}
|
||||
|
||||
void tcp_rcvbuf_grow(struct sock *sk)
|
||||
void tcp_rcvbuf_grow(struct sock *sk, u32 newval)
|
||||
{
|
||||
const struct net *net = sock_net(sk);
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
int rcvwin, rcvbuf, cap;
|
||||
u32 rcvwin, rcvbuf, cap, oldval;
|
||||
u64 grow;
|
||||
|
||||
oldval = tp->rcvq_space.space;
|
||||
tp->rcvq_space.space = newval;
|
||||
|
||||
if (!READ_ONCE(net->ipv4.sysctl_tcp_moderate_rcvbuf) ||
|
||||
(sk->sk_userlocks & SOCK_RCVBUF_LOCK))
|
||||
return;
|
||||
|
||||
/* DRS is always one RTT late. */
|
||||
rcvwin = newval << 1;
|
||||
|
||||
/* slow start: allow the sender to double its rate. */
|
||||
rcvwin = tp->rcvq_space.space << 1;
|
||||
grow = (u64)rcvwin * (newval - oldval);
|
||||
do_div(grow, oldval);
|
||||
rcvwin += grow << 1;
|
||||
|
||||
if (!RB_EMPTY_ROOT(&tp->out_of_order_queue))
|
||||
rcvwin += TCP_SKB_CB(tp->ooo_last_skb)->end_seq - tp->rcv_nxt;
|
||||
@ -943,9 +952,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
|
||||
|
||||
trace_tcp_rcvbuf_grow(sk, time);
|
||||
|
||||
tp->rcvq_space.space = copied;
|
||||
|
||||
tcp_rcvbuf_grow(sk);
|
||||
tcp_rcvbuf_grow(sk, copied);
|
||||
|
||||
new_measure:
|
||||
tp->rcvq_space.seq = tp->copied_seq;
|
||||
@ -5270,7 +5277,7 @@ end:
|
||||
}
|
||||
/* do not grow rcvbuf for not-yet-accepted or orphaned sockets. */
|
||||
if (sk->sk_socket)
|
||||
tcp_rcvbuf_grow(sk);
|
||||
tcp_rcvbuf_grow(sk, tp->rcvq_space.space);
|
||||
}
|
||||
|
||||
static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb,
|
||||
|
||||
@ -1876,6 +1876,9 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev,
|
||||
link_conf->nontransmitted = false;
|
||||
link_conf->ema_ap = false;
|
||||
link_conf->bssid_indicator = 0;
|
||||
link_conf->fils_discovery.min_interval = 0;
|
||||
link_conf->fils_discovery.max_interval = 0;
|
||||
link_conf->unsol_bcast_probe_resp_interval = 0;
|
||||
|
||||
__sta_info_flush(sdata, true, link_id, NULL);
|
||||
|
||||
|
||||
@ -508,11 +508,16 @@ static int ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,
|
||||
ret = ieee80211_key_enable_hw_accel(new);
|
||||
}
|
||||
} else {
|
||||
if (!new->local->wowlan)
|
||||
if (!new->local->wowlan) {
|
||||
ret = ieee80211_key_enable_hw_accel(new);
|
||||
else if (link_id < 0 || !sdata->vif.active_links ||
|
||||
BIT(link_id) & sdata->vif.active_links)
|
||||
} else if (link_id < 0 || !sdata->vif.active_links ||
|
||||
BIT(link_id) & sdata->vif.active_links) {
|
||||
new->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE;
|
||||
if (!(new->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC |
|
||||
IEEE80211_KEY_FLAG_PUT_MIC_SPACE |
|
||||
IEEE80211_KEY_FLAG_RESERVE_TAILROOM)))
|
||||
decrease_tailroom_need_count(sdata, 1);
|
||||
}
|
||||
}
|
||||
|
||||
if (ret)
|
||||
|
||||
@ -85,6 +85,7 @@ static const struct snmp_mib mptcp_snmp_list[] = {
|
||||
SNMP_MIB_ITEM("DssFallback", MPTCP_MIB_DSSFALLBACK),
|
||||
SNMP_MIB_ITEM("SimultConnectFallback", MPTCP_MIB_SIMULTCONNFALLBACK),
|
||||
SNMP_MIB_ITEM("FallbackFailed", MPTCP_MIB_FALLBACKFAILED),
|
||||
SNMP_MIB_ITEM("WinProbe", MPTCP_MIB_WINPROBE),
|
||||
};
|
||||
|
||||
/* mptcp_mib_alloc - allocate percpu mib counters
|
||||
|
||||
@ -88,6 +88,7 @@ enum linux_mptcp_mib_field {
|
||||
MPTCP_MIB_DSSFALLBACK, /* Bad or missing DSS */
|
||||
MPTCP_MIB_SIMULTCONNFALLBACK, /* Simultaneous connect */
|
||||
MPTCP_MIB_FALLBACKFAILED, /* Can't fallback due to msk status */
|
||||
MPTCP_MIB_WINPROBE, /* MPTCP-level zero window probe */
|
||||
__MPTCP_MIB_MAX
|
||||
};
|
||||
|
||||
|
||||
@ -194,17 +194,26 @@ static bool mptcp_ooo_try_coalesce(struct mptcp_sock *msk, struct sk_buff *to,
|
||||
* - mptcp does not maintain a msk-level window clamp
|
||||
* - returns true when the receive buffer is actually updated
|
||||
*/
|
||||
static bool mptcp_rcvbuf_grow(struct sock *sk)
|
||||
static bool mptcp_rcvbuf_grow(struct sock *sk, u32 newval)
|
||||
{
|
||||
struct mptcp_sock *msk = mptcp_sk(sk);
|
||||
const struct net *net = sock_net(sk);
|
||||
int rcvwin, rcvbuf, cap;
|
||||
u32 rcvwin, rcvbuf, cap, oldval;
|
||||
u64 grow;
|
||||
|
||||
oldval = msk->rcvq_space.space;
|
||||
msk->rcvq_space.space = newval;
|
||||
if (!READ_ONCE(net->ipv4.sysctl_tcp_moderate_rcvbuf) ||
|
||||
(sk->sk_userlocks & SOCK_RCVBUF_LOCK))
|
||||
return false;
|
||||
|
||||
rcvwin = msk->rcvq_space.space << 1;
|
||||
/* DRS is always one RTT late. */
|
||||
rcvwin = newval << 1;
|
||||
|
||||
/* slow start: allow the sender to double its rate. */
|
||||
grow = (u64)rcvwin * (newval - oldval);
|
||||
do_div(grow, oldval);
|
||||
rcvwin += grow << 1;
|
||||
|
||||
if (!RB_EMPTY_ROOT(&msk->out_of_order_queue))
|
||||
rcvwin += MPTCP_SKB_CB(msk->ooo_last_skb)->end_seq - msk->ack_seq;
|
||||
@ -334,7 +343,7 @@ end:
|
||||
skb_set_owner_r(skb, sk);
|
||||
/* do not grow rcvbuf for not-yet-accepted or orphaned sockets. */
|
||||
if (sk->sk_socket)
|
||||
mptcp_rcvbuf_grow(sk);
|
||||
mptcp_rcvbuf_grow(sk, msk->rcvq_space.space);
|
||||
}
|
||||
|
||||
static void mptcp_init_skb(struct sock *ssk, struct sk_buff *skb, int offset,
|
||||
@ -998,7 +1007,7 @@ static void __mptcp_clean_una(struct sock *sk)
|
||||
if (WARN_ON_ONCE(!msk->recovery))
|
||||
break;
|
||||
|
||||
WRITE_ONCE(msk->first_pending, mptcp_send_next(sk));
|
||||
msk->first_pending = mptcp_send_next(sk);
|
||||
}
|
||||
|
||||
dfrag_clear(sk, dfrag);
|
||||
@ -1290,7 +1299,12 @@ alloc_skb:
|
||||
if (copy == 0) {
|
||||
u64 snd_una = READ_ONCE(msk->snd_una);
|
||||
|
||||
if (snd_una != msk->snd_nxt || tcp_write_queue_tail(ssk)) {
|
||||
/* No need for zero probe if there are any data pending
|
||||
* either at the msk or ssk level; skb is the current write
|
||||
* queue tail and can be empty at this point.
|
||||
*/
|
||||
if (snd_una != msk->snd_nxt || skb->len ||
|
||||
skb != tcp_send_head(ssk)) {
|
||||
tcp_remove_empty_skb(ssk);
|
||||
return 0;
|
||||
}
|
||||
@ -1341,6 +1355,7 @@ alloc_skb:
|
||||
mpext->dsn64);
|
||||
|
||||
if (zero_window_probe) {
|
||||
MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_WINPROBE);
|
||||
mptcp_subflow_ctx(ssk)->rel_write_seq += copy;
|
||||
mpext->frozen = 1;
|
||||
if (READ_ONCE(msk->csum_enabled))
|
||||
@ -1543,7 +1558,7 @@ static int __subflow_push_pending(struct sock *sk, struct sock *ssk,
|
||||
|
||||
mptcp_update_post_push(msk, dfrag, ret);
|
||||
}
|
||||
WRITE_ONCE(msk->first_pending, mptcp_send_next(sk));
|
||||
msk->first_pending = mptcp_send_next(sk);
|
||||
|
||||
if (msk->snd_burst <= 0 ||
|
||||
!sk_stream_memory_free(ssk) ||
|
||||
@ -1903,7 +1918,7 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
|
||||
get_page(dfrag->page);
|
||||
list_add_tail(&dfrag->list, &msk->rtx_queue);
|
||||
if (!msk->first_pending)
|
||||
WRITE_ONCE(msk->first_pending, dfrag);
|
||||
msk->first_pending = dfrag;
|
||||
}
|
||||
pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d\n", msk,
|
||||
dfrag->data_seq, dfrag->data_len, dfrag->already_sent,
|
||||
@ -1936,22 +1951,36 @@ do_error:
|
||||
|
||||
static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied);
|
||||
|
||||
static int __mptcp_recvmsg_mskq(struct sock *sk,
|
||||
struct msghdr *msg,
|
||||
size_t len, int flags,
|
||||
static int __mptcp_recvmsg_mskq(struct sock *sk, struct msghdr *msg,
|
||||
size_t len, int flags, int copied_total,
|
||||
struct scm_timestamping_internal *tss,
|
||||
int *cmsg_flags)
|
||||
{
|
||||
struct mptcp_sock *msk = mptcp_sk(sk);
|
||||
struct sk_buff *skb, *tmp;
|
||||
int total_data_len = 0;
|
||||
int copied = 0;
|
||||
|
||||
skb_queue_walk_safe(&sk->sk_receive_queue, skb, tmp) {
|
||||
u32 offset = MPTCP_SKB_CB(skb)->offset;
|
||||
u32 delta, offset = MPTCP_SKB_CB(skb)->offset;
|
||||
u32 data_len = skb->len - offset;
|
||||
u32 count = min_t(size_t, len - copied, data_len);
|
||||
u32 count;
|
||||
int err;
|
||||
|
||||
if (flags & MSG_PEEK) {
|
||||
/* skip already peeked skbs */
|
||||
if (total_data_len + data_len <= copied_total) {
|
||||
total_data_len += data_len;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* skip the already peeked data in the current skb */
|
||||
delta = copied_total - total_data_len;
|
||||
offset += delta;
|
||||
data_len -= delta;
|
||||
}
|
||||
|
||||
count = min_t(size_t, len - copied, data_len);
|
||||
if (!(flags & MSG_TRUNC)) {
|
||||
err = skb_copy_datagram_msg(skb, offset, msg, count);
|
||||
if (unlikely(err < 0)) {
|
||||
@ -1968,16 +1997,14 @@ static int __mptcp_recvmsg_mskq(struct sock *sk,
|
||||
|
||||
copied += count;
|
||||
|
||||
if (count < data_len) {
|
||||
if (!(flags & MSG_PEEK)) {
|
||||
if (!(flags & MSG_PEEK)) {
|
||||
msk->bytes_consumed += count;
|
||||
if (count < data_len) {
|
||||
MPTCP_SKB_CB(skb)->offset += count;
|
||||
MPTCP_SKB_CB(skb)->map_seq += count;
|
||||
msk->bytes_consumed += count;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if (!(flags & MSG_PEEK)) {
|
||||
/* avoid the indirect call, we know the destructor is sock_rfree */
|
||||
skb->destructor = NULL;
|
||||
skb->sk = NULL;
|
||||
@ -1985,7 +2012,6 @@ static int __mptcp_recvmsg_mskq(struct sock *sk,
|
||||
sk_mem_uncharge(sk, skb->truesize);
|
||||
__skb_unlink(skb, &sk->sk_receive_queue);
|
||||
skb_attempt_defer_free(skb);
|
||||
msk->bytes_consumed += count;
|
||||
}
|
||||
|
||||
if (copied >= len)
|
||||
@ -2049,9 +2075,7 @@ static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
|
||||
if (msk->rcvq_space.copied <= msk->rcvq_space.space)
|
||||
goto new_measure;
|
||||
|
||||
msk->rcvq_space.space = msk->rcvq_space.copied;
|
||||
if (mptcp_rcvbuf_grow(sk)) {
|
||||
|
||||
if (mptcp_rcvbuf_grow(sk, msk->rcvq_space.copied)) {
|
||||
/* Make subflows follow along. If we do not do this, we
|
||||
* get drops at subflow level if skbs can't be moved to
|
||||
* the mptcp rx queue fast enough (announced rcv_win can
|
||||
@ -2063,8 +2087,9 @@ static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)
|
||||
|
||||
ssk = mptcp_subflow_tcp_sock(subflow);
|
||||
slow = lock_sock_fast(ssk);
|
||||
tcp_sk(ssk)->rcvq_space.space = msk->rcvq_space.copied;
|
||||
tcp_rcvbuf_grow(ssk);
|
||||
/* subflows can be added before tcp_init_transfer() */
|
||||
if (tcp_sk(ssk)->rcvq_space.space)
|
||||
tcp_rcvbuf_grow(ssk, msk->rcvq_space.copied);
|
||||
unlock_sock_fast(ssk, slow);
|
||||
}
|
||||
}
|
||||
@ -2183,7 +2208,8 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
|
||||
while (copied < len) {
|
||||
int err, bytes_read;
|
||||
|
||||
bytes_read = __mptcp_recvmsg_mskq(sk, msg, len - copied, flags, &tss, &cmsg_flags);
|
||||
bytes_read = __mptcp_recvmsg_mskq(sk, msg, len - copied, flags,
|
||||
copied, &tss, &cmsg_flags);
|
||||
if (unlikely(bytes_read < 0)) {
|
||||
if (!copied)
|
||||
copied = bytes_read;
|
||||
@ -2874,7 +2900,7 @@ static void __mptcp_clear_xmit(struct sock *sk)
|
||||
struct mptcp_sock *msk = mptcp_sk(sk);
|
||||
struct mptcp_data_frag *dtmp, *dfrag;
|
||||
|
||||
WRITE_ONCE(msk->first_pending, NULL);
|
||||
msk->first_pending = NULL;
|
||||
list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list)
|
||||
dfrag_clear(sk, dfrag);
|
||||
}
|
||||
@ -3414,9 +3440,6 @@ void __mptcp_data_acked(struct sock *sk)
|
||||
|
||||
void __mptcp_check_push(struct sock *sk, struct sock *ssk)
|
||||
{
|
||||
if (!mptcp_send_head(sk))
|
||||
return;
|
||||
|
||||
if (!sock_owned_by_user(sk))
|
||||
__mptcp_subflow_push_pending(sk, ssk, false);
|
||||
else
|
||||
|
||||
@ -414,7 +414,7 @@ static inline struct mptcp_data_frag *mptcp_send_head(const struct sock *sk)
|
||||
{
|
||||
const struct mptcp_sock *msk = mptcp_sk(sk);
|
||||
|
||||
return READ_ONCE(msk->first_pending);
|
||||
return msk->first_pending;
|
||||
}
|
||||
|
||||
static inline struct mptcp_data_frag *mptcp_send_next(struct sock *sk)
|
||||
|
||||
@ -48,7 +48,7 @@ static inline void nft_connlimit_do_eval(struct nft_connlimit *priv,
|
||||
return;
|
||||
}
|
||||
|
||||
count = priv->list->count;
|
||||
count = READ_ONCE(priv->list->count);
|
||||
|
||||
if ((count > priv->limit) ^ priv->invert) {
|
||||
regs->verdict.code = NFT_BREAK;
|
||||
|
||||
@ -22,6 +22,7 @@
|
||||
#include <net/netfilter/nf_conntrack_timeout.h>
|
||||
#include <net/netfilter/nf_conntrack_l4proto.h>
|
||||
#include <net/netfilter/nf_conntrack_expect.h>
|
||||
#include <net/netfilter/nf_conntrack_seqadj.h>
|
||||
|
||||
struct nft_ct_helper_obj {
|
||||
struct nf_conntrack_helper *helper4;
|
||||
@ -379,6 +380,14 @@ static bool nft_ct_tmpl_alloc_pcpu(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
static void __nft_ct_get_destroy(const struct nft_ctx *ctx, struct nft_ct *priv)
|
||||
{
|
||||
#ifdef CONFIG_NF_CONNTRACK_LABELS
|
||||
if (priv->key == NFT_CT_LABELS)
|
||||
nf_connlabels_put(ctx->net);
|
||||
#endif
|
||||
}
|
||||
|
||||
static int nft_ct_get_init(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr,
|
||||
const struct nlattr * const tb[])
|
||||
@ -413,6 +422,10 @@ static int nft_ct_get_init(const struct nft_ctx *ctx,
|
||||
if (tb[NFTA_CT_DIRECTION] != NULL)
|
||||
return -EINVAL;
|
||||
len = NF_CT_LABELS_MAX_SIZE;
|
||||
|
||||
err = nf_connlabels_get(ctx->net, (len * BITS_PER_BYTE) - 1);
|
||||
if (err)
|
||||
return err;
|
||||
break;
|
||||
#endif
|
||||
case NFT_CT_HELPER:
|
||||
@ -494,7 +507,8 @@ static int nft_ct_get_init(const struct nft_ctx *ctx,
|
||||
case IP_CT_DIR_REPLY:
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
err = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
@ -502,11 +516,11 @@ static int nft_ct_get_init(const struct nft_ctx *ctx,
|
||||
err = nft_parse_register_store(ctx, tb[NFTA_CT_DREG], &priv->dreg, NULL,
|
||||
NFT_DATA_VALUE, len);
|
||||
if (err < 0)
|
||||
return err;
|
||||
goto err;
|
||||
|
||||
err = nf_ct_netns_get(ctx->net, ctx->family);
|
||||
if (err < 0)
|
||||
return err;
|
||||
goto err;
|
||||
|
||||
if (priv->key == NFT_CT_BYTES ||
|
||||
priv->key == NFT_CT_PKTS ||
|
||||
@ -514,6 +528,9 @@ static int nft_ct_get_init(const struct nft_ctx *ctx,
|
||||
nf_ct_set_acct(ctx->net, true);
|
||||
|
||||
return 0;
|
||||
err:
|
||||
__nft_ct_get_destroy(ctx, priv);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void __nft_ct_set_destroy(const struct nft_ctx *ctx, struct nft_ct *priv)
|
||||
@ -626,6 +643,9 @@ err1:
|
||||
static void nft_ct_get_destroy(const struct nft_ctx *ctx,
|
||||
const struct nft_expr *expr)
|
||||
{
|
||||
struct nft_ct *priv = nft_expr_priv(expr);
|
||||
|
||||
__nft_ct_get_destroy(ctx, priv);
|
||||
nf_ct_netns_put(ctx->net, ctx->family);
|
||||
}
|
||||
|
||||
@ -1173,6 +1193,10 @@ static void nft_ct_helper_obj_eval(struct nft_object *obj,
|
||||
if (help) {
|
||||
rcu_assign_pointer(help->helper, to_assign);
|
||||
set_bit(IPS_HELPER_BIT, &ct->status);
|
||||
|
||||
if ((ct->status & IPS_NAT_MASK) && !nfct_seqadj(ct))
|
||||
if (!nfct_seqadj_ext_add(ct))
|
||||
regs->verdict.code = NF_DROP;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -190,7 +190,7 @@ int sctp_rcv(struct sk_buff *skb)
|
||||
goto discard_release;
|
||||
nf_reset_ct(skb);
|
||||
|
||||
if (sk_filter(sk, skb))
|
||||
if (sk_filter(sk, skb) || skb->len < sizeof(struct sctp_chunkhdr))
|
||||
goto discard_release;
|
||||
|
||||
/* Create an SCTP packet structure. */
|
||||
|
||||
@ -723,8 +723,10 @@ tls_device_rx_resync_async(struct tls_offload_resync_async *resync_async,
|
||||
/* shouldn't get to wraparound:
|
||||
* too long in async stage, something bad happened
|
||||
*/
|
||||
if (WARN_ON_ONCE(resync_async->rcd_delta == USHRT_MAX))
|
||||
if (WARN_ON_ONCE(resync_async->rcd_delta == USHRT_MAX)) {
|
||||
tls_offload_rx_resync_async_request_cancel(resync_async);
|
||||
return false;
|
||||
}
|
||||
|
||||
/* asynchronous stage: log all headers seq such that
|
||||
* req_seq <= seq <= end_seq, and wait for real resync request
|
||||
|
||||
@ -4136,8 +4136,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
|
||||
rdev->wiphy.txq_quantum = old_txq_quantum;
|
||||
}
|
||||
|
||||
if (old_rts_threshold)
|
||||
kfree(old_radio_rts_threshold);
|
||||
kfree(old_radio_rts_threshold);
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
@ -313,7 +313,7 @@ ynl_attr_put_str(struct nlmsghdr *nlh, unsigned int attr_type, const char *str)
|
||||
struct nlattr *attr;
|
||||
size_t len;
|
||||
|
||||
len = strlen(str);
|
||||
len = strlen(str) + 1;
|
||||
if (__ynl_attr_put_overflow(nlh, len))
|
||||
return;
|
||||
|
||||
@ -321,7 +321,7 @@ ynl_attr_put_str(struct nlmsghdr *nlh, unsigned int attr_type, const char *str)
|
||||
attr->nla_type = attr_type;
|
||||
|
||||
strcpy((char *)ynl_attr_data(attr), str);
|
||||
attr->nla_len = NLA_HDRLEN + NLA_ALIGN(len);
|
||||
attr->nla_len = NLA_HDRLEN + len;
|
||||
|
||||
nlh->nlmsg_len += NLMSG_ALIGN(attr->nla_len);
|
||||
}
|
||||
|
||||
@ -44,6 +44,9 @@ def print_field(reply, *desc):
|
||||
Pretty-print a set of fields from the reply. desc specifies the
|
||||
fields and the optional type (bool/yn).
|
||||
"""
|
||||
if not reply:
|
||||
return
|
||||
|
||||
if len(desc) == 0:
|
||||
return print_field(reply, *zip(reply.keys(), reply.keys()))
|
||||
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
#!/bin/sh
|
||||
#!/bin/bash
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
# Test various bareudp tunnel configurations.
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user