mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-01-11 17:10:13 +00:00
Merge branch '200GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue
Tony Nguyen says: ==================== Intel Wired LAN Driver Updates 2026-01-06 (idpf) This series contains updates to idpf driver only. Emil fixes issues related to resets; among them timeouts, NULL pointer dereferences, and memory leaks. Sreedevi resolves issues around RSS; mainly involving operations when the interface is down and resets. She also addresses some incomplete cleanups for ntuple filters and interrupts. Erik fixes incomplete output of ntuple filters. Josh sets restriction of Rx buffer size to follow hardware restrictions. Larysa adds check to prevent NULL pointer dereference when RDMA is not enabled. * '200GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue: idpf: fix aux device unplugging when rdma is not supported by vport idpf: cap maximum Rx buffer size idpf: Fix error handling in idpf_vport_open() idpf: Fix RSS LUT NULL ptr issue after soft reset idpf: Fix RSS LUT configuration on down interfaces idpf: Fix RSS LUT NULL pointer crash on early ethtool operations idpf: fix issue with ethtool -n command display idpf: fix memory leak of flow steer list on rmmod idpf: fix error handling in the init_task on load idpf: fix memory leak in idpf_vc_core_deinit() idpf: fix memory leak in idpf_vport_rel() idpf: detach and close netdevs while handling a reset idpf: keep the netdev when a reset fails ==================== Link: https://patch.msgid.link/20260107000648.1861994-1-anthony.l.nguyen@intel.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
1f20c77496
@ -284,8 +284,7 @@ struct idpf_port_stats {
|
|||||||
|
|
||||||
struct idpf_fsteer_fltr {
|
struct idpf_fsteer_fltr {
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
u32 loc;
|
struct ethtool_rx_flow_spec fs;
|
||||||
u32 q_index;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -424,14 +423,12 @@ enum idpf_user_flags {
|
|||||||
* @rss_key: RSS hash key
|
* @rss_key: RSS hash key
|
||||||
* @rss_lut_size: Size of RSS lookup table
|
* @rss_lut_size: Size of RSS lookup table
|
||||||
* @rss_lut: RSS lookup table
|
* @rss_lut: RSS lookup table
|
||||||
* @cached_lut: Used to restore previously init RSS lut
|
|
||||||
*/
|
*/
|
||||||
struct idpf_rss_data {
|
struct idpf_rss_data {
|
||||||
u16 rss_key_size;
|
u16 rss_key_size;
|
||||||
u8 *rss_key;
|
u8 *rss_key;
|
||||||
u16 rss_lut_size;
|
u16 rss_lut_size;
|
||||||
u32 *rss_lut;
|
u32 *rss_lut;
|
||||||
u32 *cached_lut;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -558,6 +555,7 @@ struct idpf_vector_lifo {
|
|||||||
* @max_q: Maximum possible queues
|
* @max_q: Maximum possible queues
|
||||||
* @req_qs_chunks: Queue chunk data for requested queues
|
* @req_qs_chunks: Queue chunk data for requested queues
|
||||||
* @mac_filter_list_lock: Lock to protect mac filters
|
* @mac_filter_list_lock: Lock to protect mac filters
|
||||||
|
* @flow_steer_list_lock: Lock to protect fsteer filters
|
||||||
* @flags: See enum idpf_vport_config_flags
|
* @flags: See enum idpf_vport_config_flags
|
||||||
*/
|
*/
|
||||||
struct idpf_vport_config {
|
struct idpf_vport_config {
|
||||||
@ -565,6 +563,7 @@ struct idpf_vport_config {
|
|||||||
struct idpf_vport_max_q max_q;
|
struct idpf_vport_max_q max_q;
|
||||||
struct virtchnl2_add_queues *req_qs_chunks;
|
struct virtchnl2_add_queues *req_qs_chunks;
|
||||||
spinlock_t mac_filter_list_lock;
|
spinlock_t mac_filter_list_lock;
|
||||||
|
spinlock_t flow_steer_list_lock;
|
||||||
DECLARE_BITMAP(flags, IDPF_VPORT_CONFIG_FLAGS_NBITS);
|
DECLARE_BITMAP(flags, IDPF_VPORT_CONFIG_FLAGS_NBITS);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@ -37,6 +37,7 @@ static int idpf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
|
|||||||
{
|
{
|
||||||
struct idpf_netdev_priv *np = netdev_priv(netdev);
|
struct idpf_netdev_priv *np = netdev_priv(netdev);
|
||||||
struct idpf_vport_user_config_data *user_config;
|
struct idpf_vport_user_config_data *user_config;
|
||||||
|
struct idpf_vport_config *vport_config;
|
||||||
struct idpf_fsteer_fltr *f;
|
struct idpf_fsteer_fltr *f;
|
||||||
struct idpf_vport *vport;
|
struct idpf_vport *vport;
|
||||||
unsigned int cnt = 0;
|
unsigned int cnt = 0;
|
||||||
@ -44,7 +45,8 @@ static int idpf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
|
|||||||
|
|
||||||
idpf_vport_ctrl_lock(netdev);
|
idpf_vport_ctrl_lock(netdev);
|
||||||
vport = idpf_netdev_to_vport(netdev);
|
vport = idpf_netdev_to_vport(netdev);
|
||||||
user_config = &np->adapter->vport_config[np->vport_idx]->user_config;
|
vport_config = np->adapter->vport_config[np->vport_idx];
|
||||||
|
user_config = &vport_config->user_config;
|
||||||
|
|
||||||
switch (cmd->cmd) {
|
switch (cmd->cmd) {
|
||||||
case ETHTOOL_GRXCLSRLCNT:
|
case ETHTOOL_GRXCLSRLCNT:
|
||||||
@ -52,26 +54,34 @@ static int idpf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
|
|||||||
cmd->data = idpf_fsteer_max_rules(vport);
|
cmd->data = idpf_fsteer_max_rules(vport);
|
||||||
break;
|
break;
|
||||||
case ETHTOOL_GRXCLSRULE:
|
case ETHTOOL_GRXCLSRULE:
|
||||||
err = -EINVAL;
|
err = -ENOENT;
|
||||||
|
spin_lock_bh(&vport_config->flow_steer_list_lock);
|
||||||
list_for_each_entry(f, &user_config->flow_steer_list, list)
|
list_for_each_entry(f, &user_config->flow_steer_list, list)
|
||||||
if (f->loc == cmd->fs.location) {
|
if (f->fs.location == cmd->fs.location) {
|
||||||
cmd->fs.ring_cookie = f->q_index;
|
/* Avoid infoleak from padding: zero first,
|
||||||
|
* then assign fields
|
||||||
|
*/
|
||||||
|
memset(&cmd->fs, 0, sizeof(cmd->fs));
|
||||||
|
cmd->fs = f->fs;
|
||||||
err = 0;
|
err = 0;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
spin_unlock_bh(&vport_config->flow_steer_list_lock);
|
||||||
break;
|
break;
|
||||||
case ETHTOOL_GRXCLSRLALL:
|
case ETHTOOL_GRXCLSRLALL:
|
||||||
cmd->data = idpf_fsteer_max_rules(vport);
|
cmd->data = idpf_fsteer_max_rules(vport);
|
||||||
|
spin_lock_bh(&vport_config->flow_steer_list_lock);
|
||||||
list_for_each_entry(f, &user_config->flow_steer_list, list) {
|
list_for_each_entry(f, &user_config->flow_steer_list, list) {
|
||||||
if (cnt == cmd->rule_cnt) {
|
if (cnt == cmd->rule_cnt) {
|
||||||
err = -EMSGSIZE;
|
err = -EMSGSIZE;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
rule_locs[cnt] = f->loc;
|
rule_locs[cnt] = f->fs.location;
|
||||||
cnt++;
|
cnt++;
|
||||||
}
|
}
|
||||||
if (!err)
|
if (!err)
|
||||||
cmd->rule_cnt = user_config->num_fsteer_fltrs;
|
cmd->rule_cnt = user_config->num_fsteer_fltrs;
|
||||||
|
spin_unlock_bh(&vport_config->flow_steer_list_lock);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
@ -168,7 +178,7 @@ static int idpf_add_flow_steer(struct net_device *netdev,
|
|||||||
struct idpf_vport *vport;
|
struct idpf_vport *vport;
|
||||||
u32 flow_type, q_index;
|
u32 flow_type, q_index;
|
||||||
u16 num_rxq;
|
u16 num_rxq;
|
||||||
int err;
|
int err = 0;
|
||||||
|
|
||||||
vport = idpf_netdev_to_vport(netdev);
|
vport = idpf_netdev_to_vport(netdev);
|
||||||
vport_config = vport->adapter->vport_config[np->vport_idx];
|
vport_config = vport->adapter->vport_config[np->vport_idx];
|
||||||
@ -194,6 +204,29 @@ static int idpf_add_flow_steer(struct net_device *netdev,
|
|||||||
if (!rule)
|
if (!rule)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
fltr = kzalloc(sizeof(*fltr), GFP_KERNEL);
|
||||||
|
if (!fltr) {
|
||||||
|
err = -ENOMEM;
|
||||||
|
goto out_free_rule;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* detect duplicate entry and reject before adding rules */
|
||||||
|
spin_lock_bh(&vport_config->flow_steer_list_lock);
|
||||||
|
list_for_each_entry(f, &user_config->flow_steer_list, list) {
|
||||||
|
if (f->fs.location == fsp->location) {
|
||||||
|
err = -EEXIST;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (f->fs.location > fsp->location)
|
||||||
|
break;
|
||||||
|
parent = f;
|
||||||
|
}
|
||||||
|
spin_unlock_bh(&vport_config->flow_steer_list_lock);
|
||||||
|
|
||||||
|
if (err)
|
||||||
|
goto out;
|
||||||
|
|
||||||
rule->vport_id = cpu_to_le32(vport->vport_id);
|
rule->vport_id = cpu_to_le32(vport->vport_id);
|
||||||
rule->count = cpu_to_le32(1);
|
rule->count = cpu_to_le32(1);
|
||||||
info = &rule->rule_info[0];
|
info = &rule->rule_info[0];
|
||||||
@ -232,26 +265,20 @@ static int idpf_add_flow_steer(struct net_device *netdev,
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
fltr = kzalloc(sizeof(*fltr), GFP_KERNEL);
|
/* Save a copy of the user's flow spec so ethtool can later retrieve it */
|
||||||
if (!fltr) {
|
fltr->fs = *fsp;
|
||||||
err = -ENOMEM;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
fltr->loc = fsp->location;
|
|
||||||
fltr->q_index = q_index;
|
|
||||||
list_for_each_entry(f, &user_config->flow_steer_list, list) {
|
|
||||||
if (f->loc >= fltr->loc)
|
|
||||||
break;
|
|
||||||
parent = f;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
spin_lock_bh(&vport_config->flow_steer_list_lock);
|
||||||
parent ? list_add(&fltr->list, &parent->list) :
|
parent ? list_add(&fltr->list, &parent->list) :
|
||||||
list_add(&fltr->list, &user_config->flow_steer_list);
|
list_add(&fltr->list, &user_config->flow_steer_list);
|
||||||
|
|
||||||
user_config->num_fsteer_fltrs++;
|
user_config->num_fsteer_fltrs++;
|
||||||
|
spin_unlock_bh(&vport_config->flow_steer_list_lock);
|
||||||
|
goto out_free_rule;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
kfree(fltr);
|
||||||
|
out_free_rule:
|
||||||
kfree(rule);
|
kfree(rule);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
@ -302,17 +329,20 @@ static int idpf_del_flow_steer(struct net_device *netdev,
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
spin_lock_bh(&vport_config->flow_steer_list_lock);
|
||||||
list_for_each_entry_safe(f, iter,
|
list_for_each_entry_safe(f, iter,
|
||||||
&user_config->flow_steer_list, list) {
|
&user_config->flow_steer_list, list) {
|
||||||
if (f->loc == fsp->location) {
|
if (f->fs.location == fsp->location) {
|
||||||
list_del(&f->list);
|
list_del(&f->list);
|
||||||
kfree(f);
|
kfree(f);
|
||||||
user_config->num_fsteer_fltrs--;
|
user_config->num_fsteer_fltrs--;
|
||||||
goto out;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
err = -EINVAL;
|
err = -ENOENT;
|
||||||
|
|
||||||
|
out_unlock:
|
||||||
|
spin_unlock_bh(&vport_config->flow_steer_list_lock);
|
||||||
out:
|
out:
|
||||||
kfree(rule);
|
kfree(rule);
|
||||||
return err;
|
return err;
|
||||||
@ -381,7 +411,10 @@ static u32 idpf_get_rxfh_indir_size(struct net_device *netdev)
|
|||||||
* @netdev: network interface device structure
|
* @netdev: network interface device structure
|
||||||
* @rxfh: pointer to param struct (indir, key, hfunc)
|
* @rxfh: pointer to param struct (indir, key, hfunc)
|
||||||
*
|
*
|
||||||
* Reads the indirection table directly from the hardware. Always returns 0.
|
* RSS LUT and Key information are read from driver's cached
|
||||||
|
* copy. When rxhash is off, rss lut will be displayed as zeros.
|
||||||
|
*
|
||||||
|
* Return: 0 on success, -errno otherwise.
|
||||||
*/
|
*/
|
||||||
static int idpf_get_rxfh(struct net_device *netdev,
|
static int idpf_get_rxfh(struct net_device *netdev,
|
||||||
struct ethtool_rxfh_param *rxfh)
|
struct ethtool_rxfh_param *rxfh)
|
||||||
@ -389,10 +422,13 @@ static int idpf_get_rxfh(struct net_device *netdev,
|
|||||||
struct idpf_netdev_priv *np = netdev_priv(netdev);
|
struct idpf_netdev_priv *np = netdev_priv(netdev);
|
||||||
struct idpf_rss_data *rss_data;
|
struct idpf_rss_data *rss_data;
|
||||||
struct idpf_adapter *adapter;
|
struct idpf_adapter *adapter;
|
||||||
|
struct idpf_vport *vport;
|
||||||
|
bool rxhash_ena;
|
||||||
int err = 0;
|
int err = 0;
|
||||||
u16 i;
|
u16 i;
|
||||||
|
|
||||||
idpf_vport_ctrl_lock(netdev);
|
idpf_vport_ctrl_lock(netdev);
|
||||||
|
vport = idpf_netdev_to_vport(netdev);
|
||||||
|
|
||||||
adapter = np->adapter;
|
adapter = np->adapter;
|
||||||
|
|
||||||
@ -402,9 +438,8 @@ static int idpf_get_rxfh(struct net_device *netdev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
rss_data = &adapter->vport_config[np->vport_idx]->user_config.rss_data;
|
rss_data = &adapter->vport_config[np->vport_idx]->user_config.rss_data;
|
||||||
if (!test_bit(IDPF_VPORT_UP, np->state))
|
|
||||||
goto unlock_mutex;
|
|
||||||
|
|
||||||
|
rxhash_ena = idpf_is_feature_ena(vport, NETIF_F_RXHASH);
|
||||||
rxfh->hfunc = ETH_RSS_HASH_TOP;
|
rxfh->hfunc = ETH_RSS_HASH_TOP;
|
||||||
|
|
||||||
if (rxfh->key)
|
if (rxfh->key)
|
||||||
@ -412,7 +447,7 @@ static int idpf_get_rxfh(struct net_device *netdev,
|
|||||||
|
|
||||||
if (rxfh->indir) {
|
if (rxfh->indir) {
|
||||||
for (i = 0; i < rss_data->rss_lut_size; i++)
|
for (i = 0; i < rss_data->rss_lut_size; i++)
|
||||||
rxfh->indir[i] = rss_data->rss_lut[i];
|
rxfh->indir[i] = rxhash_ena ? rss_data->rss_lut[i] : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
unlock_mutex:
|
unlock_mutex:
|
||||||
@ -452,8 +487,6 @@ static int idpf_set_rxfh(struct net_device *netdev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
|
rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
|
||||||
if (!test_bit(IDPF_VPORT_UP, np->state))
|
|
||||||
goto unlock_mutex;
|
|
||||||
|
|
||||||
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
|
if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE &&
|
||||||
rxfh->hfunc != ETH_RSS_HASH_TOP) {
|
rxfh->hfunc != ETH_RSS_HASH_TOP) {
|
||||||
@ -469,7 +502,8 @@ static int idpf_set_rxfh(struct net_device *netdev,
|
|||||||
rss_data->rss_lut[lut] = rxfh->indir[lut];
|
rss_data->rss_lut[lut] = rxfh->indir[lut];
|
||||||
}
|
}
|
||||||
|
|
||||||
err = idpf_config_rss(vport);
|
if (test_bit(IDPF_VPORT_UP, np->state))
|
||||||
|
err = idpf_config_rss(vport);
|
||||||
|
|
||||||
unlock_mutex:
|
unlock_mutex:
|
||||||
idpf_vport_ctrl_unlock(netdev);
|
idpf_vport_ctrl_unlock(netdev);
|
||||||
|
|||||||
@ -322,7 +322,7 @@ static void idpf_idc_vport_dev_down(struct idpf_adapter *adapter)
|
|||||||
for (i = 0; i < adapter->num_alloc_vports; i++) {
|
for (i = 0; i < adapter->num_alloc_vports; i++) {
|
||||||
struct idpf_vport *vport = adapter->vports[i];
|
struct idpf_vport *vport = adapter->vports[i];
|
||||||
|
|
||||||
if (!vport)
|
if (!vport || !vport->vdev_info)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
idpf_unplug_aux_dev(vport->vdev_info->adev);
|
idpf_unplug_aux_dev(vport->vdev_info->adev);
|
||||||
|
|||||||
@ -442,6 +442,29 @@ send_dealloc_vecs:
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* idpf_del_all_flow_steer_filters - Delete all flow steer filters in list
|
||||||
|
* @vport: main vport struct
|
||||||
|
*
|
||||||
|
* Takes flow_steer_list_lock spinlock. Deletes all filters
|
||||||
|
*/
|
||||||
|
static void idpf_del_all_flow_steer_filters(struct idpf_vport *vport)
|
||||||
|
{
|
||||||
|
struct idpf_vport_config *vport_config;
|
||||||
|
struct idpf_fsteer_fltr *f, *ftmp;
|
||||||
|
|
||||||
|
vport_config = vport->adapter->vport_config[vport->idx];
|
||||||
|
|
||||||
|
spin_lock_bh(&vport_config->flow_steer_list_lock);
|
||||||
|
list_for_each_entry_safe(f, ftmp, &vport_config->user_config.flow_steer_list,
|
||||||
|
list) {
|
||||||
|
list_del(&f->list);
|
||||||
|
kfree(f);
|
||||||
|
}
|
||||||
|
vport_config->user_config.num_fsteer_fltrs = 0;
|
||||||
|
spin_unlock_bh(&vport_config->flow_steer_list_lock);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* idpf_find_mac_filter - Search filter list for specific mac filter
|
* idpf_find_mac_filter - Search filter list for specific mac filter
|
||||||
* @vconfig: Vport config structure
|
* @vconfig: Vport config structure
|
||||||
@ -729,6 +752,65 @@ static int idpf_init_mac_addr(struct idpf_vport *vport,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void idpf_detach_and_close(struct idpf_adapter *adapter)
|
||||||
|
{
|
||||||
|
int max_vports = adapter->max_vports;
|
||||||
|
|
||||||
|
for (int i = 0; i < max_vports; i++) {
|
||||||
|
struct net_device *netdev = adapter->netdevs[i];
|
||||||
|
|
||||||
|
/* If the interface is in detached state, that means the
|
||||||
|
* previous reset was not handled successfully for this
|
||||||
|
* vport.
|
||||||
|
*/
|
||||||
|
if (!netif_device_present(netdev))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
/* Hold RTNL to protect racing with callbacks */
|
||||||
|
rtnl_lock();
|
||||||
|
netif_device_detach(netdev);
|
||||||
|
if (netif_running(netdev)) {
|
||||||
|
set_bit(IDPF_VPORT_UP_REQUESTED,
|
||||||
|
adapter->vport_config[i]->flags);
|
||||||
|
dev_close(netdev);
|
||||||
|
}
|
||||||
|
rtnl_unlock();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void idpf_attach_and_open(struct idpf_adapter *adapter)
|
||||||
|
{
|
||||||
|
int max_vports = adapter->max_vports;
|
||||||
|
|
||||||
|
for (int i = 0; i < max_vports; i++) {
|
||||||
|
struct idpf_vport *vport = adapter->vports[i];
|
||||||
|
struct idpf_vport_config *vport_config;
|
||||||
|
struct net_device *netdev;
|
||||||
|
|
||||||
|
/* In case of a critical error in the init task, the vport
|
||||||
|
* will be freed. Only continue to restore the netdevs
|
||||||
|
* if the vport is allocated.
|
||||||
|
*/
|
||||||
|
if (!vport)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
/* No need for RTNL on attach as this function is called
|
||||||
|
* following detach and dev_close(). We do take RTNL for
|
||||||
|
* dev_open() below as it can race with external callbacks
|
||||||
|
* following the call to netif_device_attach().
|
||||||
|
*/
|
||||||
|
netdev = adapter->netdevs[i];
|
||||||
|
netif_device_attach(netdev);
|
||||||
|
vport_config = adapter->vport_config[vport->idx];
|
||||||
|
if (test_and_clear_bit(IDPF_VPORT_UP_REQUESTED,
|
||||||
|
vport_config->flags)) {
|
||||||
|
rtnl_lock();
|
||||||
|
dev_open(netdev, NULL);
|
||||||
|
rtnl_unlock();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* idpf_cfg_netdev - Allocate, configure and register a netdev
|
* idpf_cfg_netdev - Allocate, configure and register a netdev
|
||||||
* @vport: main vport structure
|
* @vport: main vport structure
|
||||||
@ -991,7 +1073,7 @@ static void idpf_vport_rel(struct idpf_vport *vport)
|
|||||||
u16 idx = vport->idx;
|
u16 idx = vport->idx;
|
||||||
|
|
||||||
vport_config = adapter->vport_config[vport->idx];
|
vport_config = adapter->vport_config[vport->idx];
|
||||||
idpf_deinit_rss(vport);
|
idpf_deinit_rss_lut(vport);
|
||||||
rss_data = &vport_config->user_config.rss_data;
|
rss_data = &vport_config->user_config.rss_data;
|
||||||
kfree(rss_data->rss_key);
|
kfree(rss_data->rss_key);
|
||||||
rss_data->rss_key = NULL;
|
rss_data->rss_key = NULL;
|
||||||
@ -1023,6 +1105,8 @@ static void idpf_vport_rel(struct idpf_vport *vport)
|
|||||||
kfree(adapter->vport_config[idx]->req_qs_chunks);
|
kfree(adapter->vport_config[idx]->req_qs_chunks);
|
||||||
adapter->vport_config[idx]->req_qs_chunks = NULL;
|
adapter->vport_config[idx]->req_qs_chunks = NULL;
|
||||||
}
|
}
|
||||||
|
kfree(vport->rx_ptype_lkup);
|
||||||
|
vport->rx_ptype_lkup = NULL;
|
||||||
kfree(vport);
|
kfree(vport);
|
||||||
adapter->num_alloc_vports--;
|
adapter->num_alloc_vports--;
|
||||||
}
|
}
|
||||||
@ -1041,12 +1125,15 @@ static void idpf_vport_dealloc(struct idpf_vport *vport)
|
|||||||
idpf_idc_deinit_vport_aux_device(vport->vdev_info);
|
idpf_idc_deinit_vport_aux_device(vport->vdev_info);
|
||||||
|
|
||||||
idpf_deinit_mac_addr(vport);
|
idpf_deinit_mac_addr(vport);
|
||||||
idpf_vport_stop(vport, true);
|
|
||||||
|
|
||||||
if (!test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags))
|
if (!test_bit(IDPF_HR_RESET_IN_PROG, adapter->flags)) {
|
||||||
|
idpf_vport_stop(vport, true);
|
||||||
idpf_decfg_netdev(vport);
|
idpf_decfg_netdev(vport);
|
||||||
if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
|
}
|
||||||
|
if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) {
|
||||||
idpf_del_all_mac_filters(vport);
|
idpf_del_all_mac_filters(vport);
|
||||||
|
idpf_del_all_flow_steer_filters(vport);
|
||||||
|
}
|
||||||
|
|
||||||
if (adapter->netdevs[i]) {
|
if (adapter->netdevs[i]) {
|
||||||
struct idpf_netdev_priv *np = netdev_priv(adapter->netdevs[i]);
|
struct idpf_netdev_priv *np = netdev_priv(adapter->netdevs[i]);
|
||||||
@ -1139,6 +1226,7 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
|
|||||||
u16 idx = adapter->next_vport;
|
u16 idx = adapter->next_vport;
|
||||||
struct idpf_vport *vport;
|
struct idpf_vport *vport;
|
||||||
u16 num_max_q;
|
u16 num_max_q;
|
||||||
|
int err;
|
||||||
|
|
||||||
if (idx == IDPF_NO_FREE_SLOT)
|
if (idx == IDPF_NO_FREE_SLOT)
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -1189,10 +1277,11 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
|
|||||||
|
|
||||||
idpf_vport_init(vport, max_q);
|
idpf_vport_init(vport, max_q);
|
||||||
|
|
||||||
/* This alloc is done separate from the LUT because it's not strictly
|
/* LUT and key are both initialized here. Key is not strictly dependent
|
||||||
* dependent on how many queues we have. If we change number of queues
|
* on how many queues we have. If we change number of queues and soft
|
||||||
* and soft reset we'll need a new LUT but the key can remain the same
|
* reset is initiated, LUT will be freed and a new LUT will be allocated
|
||||||
* for as long as the vport exists.
|
* as per the updated number of queues during vport bringup. However,
|
||||||
|
* the key remains the same for as long as the vport exists.
|
||||||
*/
|
*/
|
||||||
rss_data = &adapter->vport_config[idx]->user_config.rss_data;
|
rss_data = &adapter->vport_config[idx]->user_config.rss_data;
|
||||||
rss_data->rss_key = kzalloc(rss_data->rss_key_size, GFP_KERNEL);
|
rss_data->rss_key = kzalloc(rss_data->rss_key_size, GFP_KERNEL);
|
||||||
@ -1202,6 +1291,11 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
|
|||||||
/* Initialize default rss key */
|
/* Initialize default rss key */
|
||||||
netdev_rss_key_fill((void *)rss_data->rss_key, rss_data->rss_key_size);
|
netdev_rss_key_fill((void *)rss_data->rss_key, rss_data->rss_key_size);
|
||||||
|
|
||||||
|
/* Initialize default rss LUT */
|
||||||
|
err = idpf_init_rss_lut(vport);
|
||||||
|
if (err)
|
||||||
|
goto free_rss_key;
|
||||||
|
|
||||||
/* fill vport slot in the adapter struct */
|
/* fill vport slot in the adapter struct */
|
||||||
adapter->vports[idx] = vport;
|
adapter->vports[idx] = vport;
|
||||||
adapter->vport_ids[idx] = idpf_get_vport_id(vport);
|
adapter->vport_ids[idx] = idpf_get_vport_id(vport);
|
||||||
@ -1212,6 +1306,8 @@ static struct idpf_vport *idpf_vport_alloc(struct idpf_adapter *adapter,
|
|||||||
|
|
||||||
return vport;
|
return vport;
|
||||||
|
|
||||||
|
free_rss_key:
|
||||||
|
kfree(rss_data->rss_key);
|
||||||
free_vector_idxs:
|
free_vector_idxs:
|
||||||
kfree(vport->q_vector_idxs);
|
kfree(vport->q_vector_idxs);
|
||||||
free_vport:
|
free_vport:
|
||||||
@ -1388,7 +1484,6 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
|
|||||||
{
|
{
|
||||||
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
|
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
|
||||||
struct idpf_adapter *adapter = vport->adapter;
|
struct idpf_adapter *adapter = vport->adapter;
|
||||||
struct idpf_vport_config *vport_config;
|
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (test_bit(IDPF_VPORT_UP, np->state))
|
if (test_bit(IDPF_VPORT_UP, np->state))
|
||||||
@ -1429,14 +1524,14 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
|
|||||||
if (err) {
|
if (err) {
|
||||||
dev_err(&adapter->pdev->dev, "Failed to initialize queue registers for vport %u: %d\n",
|
dev_err(&adapter->pdev->dev, "Failed to initialize queue registers for vport %u: %d\n",
|
||||||
vport->vport_id, err);
|
vport->vport_id, err);
|
||||||
goto queues_rel;
|
goto intr_deinit;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = idpf_rx_bufs_init_all(vport);
|
err = idpf_rx_bufs_init_all(vport);
|
||||||
if (err) {
|
if (err) {
|
||||||
dev_err(&adapter->pdev->dev, "Failed to initialize RX buffers for vport %u: %d\n",
|
dev_err(&adapter->pdev->dev, "Failed to initialize RX buffers for vport %u: %d\n",
|
||||||
vport->vport_id, err);
|
vport->vport_id, err);
|
||||||
goto queues_rel;
|
goto intr_deinit;
|
||||||
}
|
}
|
||||||
|
|
||||||
idpf_rx_init_buf_tail(vport);
|
idpf_rx_init_buf_tail(vport);
|
||||||
@ -1482,13 +1577,9 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
|
|||||||
|
|
||||||
idpf_restore_features(vport);
|
idpf_restore_features(vport);
|
||||||
|
|
||||||
vport_config = adapter->vport_config[vport->idx];
|
err = idpf_config_rss(vport);
|
||||||
if (vport_config->user_config.rss_data.rss_lut)
|
|
||||||
err = idpf_config_rss(vport);
|
|
||||||
else
|
|
||||||
err = idpf_init_rss(vport);
|
|
||||||
if (err) {
|
if (err) {
|
||||||
dev_err(&adapter->pdev->dev, "Failed to initialize RSS for vport %u: %d\n",
|
dev_err(&adapter->pdev->dev, "Failed to configure RSS for vport %u: %d\n",
|
||||||
vport->vport_id, err);
|
vport->vport_id, err);
|
||||||
goto disable_vport;
|
goto disable_vport;
|
||||||
}
|
}
|
||||||
@ -1497,7 +1588,7 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
|
|||||||
if (err) {
|
if (err) {
|
||||||
dev_err(&adapter->pdev->dev, "Failed to complete interface up for vport %u: %d\n",
|
dev_err(&adapter->pdev->dev, "Failed to complete interface up for vport %u: %d\n",
|
||||||
vport->vport_id, err);
|
vport->vport_id, err);
|
||||||
goto deinit_rss;
|
goto disable_vport;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (rtnl)
|
if (rtnl)
|
||||||
@ -1505,8 +1596,6 @@ static int idpf_vport_open(struct idpf_vport *vport, bool rtnl)
|
|||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
deinit_rss:
|
|
||||||
idpf_deinit_rss(vport);
|
|
||||||
disable_vport:
|
disable_vport:
|
||||||
idpf_send_disable_vport_msg(vport);
|
idpf_send_disable_vport_msg(vport);
|
||||||
disable_queues:
|
disable_queues:
|
||||||
@ -1544,7 +1633,6 @@ void idpf_init_task(struct work_struct *work)
|
|||||||
struct idpf_vport_config *vport_config;
|
struct idpf_vport_config *vport_config;
|
||||||
struct idpf_vport_max_q max_q;
|
struct idpf_vport_max_q max_q;
|
||||||
struct idpf_adapter *adapter;
|
struct idpf_adapter *adapter;
|
||||||
struct idpf_netdev_priv *np;
|
|
||||||
struct idpf_vport *vport;
|
struct idpf_vport *vport;
|
||||||
u16 num_default_vports;
|
u16 num_default_vports;
|
||||||
struct pci_dev *pdev;
|
struct pci_dev *pdev;
|
||||||
@ -1579,10 +1667,15 @@ void idpf_init_task(struct work_struct *work)
|
|||||||
goto unwind_vports;
|
goto unwind_vports;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err = idpf_send_get_rx_ptype_msg(vport);
|
||||||
|
if (err)
|
||||||
|
goto unwind_vports;
|
||||||
|
|
||||||
index = vport->idx;
|
index = vport->idx;
|
||||||
vport_config = adapter->vport_config[index];
|
vport_config = adapter->vport_config[index];
|
||||||
|
|
||||||
spin_lock_init(&vport_config->mac_filter_list_lock);
|
spin_lock_init(&vport_config->mac_filter_list_lock);
|
||||||
|
spin_lock_init(&vport_config->flow_steer_list_lock);
|
||||||
|
|
||||||
INIT_LIST_HEAD(&vport_config->user_config.mac_filter_list);
|
INIT_LIST_HEAD(&vport_config->user_config.mac_filter_list);
|
||||||
INIT_LIST_HEAD(&vport_config->user_config.flow_steer_list);
|
INIT_LIST_HEAD(&vport_config->user_config.flow_steer_list);
|
||||||
@ -1590,21 +1683,11 @@ void idpf_init_task(struct work_struct *work)
|
|||||||
err = idpf_check_supported_desc_ids(vport);
|
err = idpf_check_supported_desc_ids(vport);
|
||||||
if (err) {
|
if (err) {
|
||||||
dev_err(&pdev->dev, "failed to get required descriptor ids\n");
|
dev_err(&pdev->dev, "failed to get required descriptor ids\n");
|
||||||
goto cfg_netdev_err;
|
goto unwind_vports;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (idpf_cfg_netdev(vport))
|
if (idpf_cfg_netdev(vport))
|
||||||
goto cfg_netdev_err;
|
goto unwind_vports;
|
||||||
|
|
||||||
err = idpf_send_get_rx_ptype_msg(vport);
|
|
||||||
if (err)
|
|
||||||
goto handle_err;
|
|
||||||
|
|
||||||
/* Once state is put into DOWN, driver is ready for dev_open */
|
|
||||||
np = netdev_priv(vport->netdev);
|
|
||||||
clear_bit(IDPF_VPORT_UP, np->state);
|
|
||||||
if (test_and_clear_bit(IDPF_VPORT_UP_REQUESTED, vport_config->flags))
|
|
||||||
idpf_vport_open(vport, true);
|
|
||||||
|
|
||||||
/* Spawn and return 'idpf_init_task' work queue until all the
|
/* Spawn and return 'idpf_init_task' work queue until all the
|
||||||
* default vports are created
|
* default vports are created
|
||||||
@ -1635,21 +1718,15 @@ void idpf_init_task(struct work_struct *work)
|
|||||||
set_bit(IDPF_VPORT_REG_NETDEV, vport_config->flags);
|
set_bit(IDPF_VPORT_REG_NETDEV, vport_config->flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* As all the required vports are created, clear the reset flag
|
/* Clear the reset and load bits as all vports are created */
|
||||||
* unconditionally here in case we were in reset and the link was down.
|
|
||||||
*/
|
|
||||||
clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
|
clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
|
||||||
|
clear_bit(IDPF_HR_DRV_LOAD, adapter->flags);
|
||||||
/* Start the statistics task now */
|
/* Start the statistics task now */
|
||||||
queue_delayed_work(adapter->stats_wq, &adapter->stats_task,
|
queue_delayed_work(adapter->stats_wq, &adapter->stats_task,
|
||||||
msecs_to_jiffies(10 * (pdev->devfn & 0x07)));
|
msecs_to_jiffies(10 * (pdev->devfn & 0x07)));
|
||||||
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
handle_err:
|
|
||||||
idpf_decfg_netdev(vport);
|
|
||||||
cfg_netdev_err:
|
|
||||||
idpf_vport_rel(vport);
|
|
||||||
adapter->vports[index] = NULL;
|
|
||||||
unwind_vports:
|
unwind_vports:
|
||||||
if (default_vport) {
|
if (default_vport) {
|
||||||
for (index = 0; index < adapter->max_vports; index++) {
|
for (index = 0; index < adapter->max_vports; index++) {
|
||||||
@ -1657,6 +1734,15 @@ unwind_vports:
|
|||||||
idpf_vport_dealloc(adapter->vports[index]);
|
idpf_vport_dealloc(adapter->vports[index]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
/* Cleanup after vc_core_init, which has no way of knowing the
|
||||||
|
* init task failed on driver load.
|
||||||
|
*/
|
||||||
|
if (test_and_clear_bit(IDPF_HR_DRV_LOAD, adapter->flags)) {
|
||||||
|
cancel_delayed_work_sync(&adapter->serv_task);
|
||||||
|
cancel_delayed_work_sync(&adapter->mbx_task);
|
||||||
|
}
|
||||||
|
idpf_ptp_release(adapter);
|
||||||
|
|
||||||
clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
|
clear_bit(IDPF_HR_RESET_IN_PROG, adapter->flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1786,27 +1872,6 @@ static int idpf_check_reset_complete(struct idpf_hw *hw,
|
|||||||
return -EBUSY;
|
return -EBUSY;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* idpf_set_vport_state - Set the vport state to be after the reset
|
|
||||||
* @adapter: Driver specific private structure
|
|
||||||
*/
|
|
||||||
static void idpf_set_vport_state(struct idpf_adapter *adapter)
|
|
||||||
{
|
|
||||||
u16 i;
|
|
||||||
|
|
||||||
for (i = 0; i < adapter->max_vports; i++) {
|
|
||||||
struct idpf_netdev_priv *np;
|
|
||||||
|
|
||||||
if (!adapter->netdevs[i])
|
|
||||||
continue;
|
|
||||||
|
|
||||||
np = netdev_priv(adapter->netdevs[i]);
|
|
||||||
if (test_bit(IDPF_VPORT_UP, np->state))
|
|
||||||
set_bit(IDPF_VPORT_UP_REQUESTED,
|
|
||||||
adapter->vport_config[i]->flags);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* idpf_init_hard_reset - Initiate a hardware reset
|
* idpf_init_hard_reset - Initiate a hardware reset
|
||||||
* @adapter: Driver specific private structure
|
* @adapter: Driver specific private structure
|
||||||
@ -1815,37 +1880,25 @@ static void idpf_set_vport_state(struct idpf_adapter *adapter)
|
|||||||
* reallocate. Also reinitialize the mailbox. Return 0 on success,
|
* reallocate. Also reinitialize the mailbox. Return 0 on success,
|
||||||
* negative on failure.
|
* negative on failure.
|
||||||
*/
|
*/
|
||||||
static int idpf_init_hard_reset(struct idpf_adapter *adapter)
|
static void idpf_init_hard_reset(struct idpf_adapter *adapter)
|
||||||
{
|
{
|
||||||
struct idpf_reg_ops *reg_ops = &adapter->dev_ops.reg_ops;
|
struct idpf_reg_ops *reg_ops = &adapter->dev_ops.reg_ops;
|
||||||
struct device *dev = &adapter->pdev->dev;
|
struct device *dev = &adapter->pdev->dev;
|
||||||
struct net_device *netdev;
|
|
||||||
int err;
|
int err;
|
||||||
u16 i;
|
|
||||||
|
|
||||||
|
idpf_detach_and_close(adapter);
|
||||||
mutex_lock(&adapter->vport_ctrl_lock);
|
mutex_lock(&adapter->vport_ctrl_lock);
|
||||||
|
|
||||||
dev_info(dev, "Device HW Reset initiated\n");
|
dev_info(dev, "Device HW Reset initiated\n");
|
||||||
|
|
||||||
/* Avoid TX hangs on reset */
|
|
||||||
for (i = 0; i < adapter->max_vports; i++) {
|
|
||||||
netdev = adapter->netdevs[i];
|
|
||||||
if (!netdev)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
netif_carrier_off(netdev);
|
|
||||||
netif_tx_disable(netdev);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Prepare for reset */
|
/* Prepare for reset */
|
||||||
if (test_and_clear_bit(IDPF_HR_DRV_LOAD, adapter->flags)) {
|
if (test_bit(IDPF_HR_DRV_LOAD, adapter->flags)) {
|
||||||
reg_ops->trigger_reset(adapter, IDPF_HR_DRV_LOAD);
|
reg_ops->trigger_reset(adapter, IDPF_HR_DRV_LOAD);
|
||||||
} else if (test_and_clear_bit(IDPF_HR_FUNC_RESET, adapter->flags)) {
|
} else if (test_and_clear_bit(IDPF_HR_FUNC_RESET, adapter->flags)) {
|
||||||
bool is_reset = idpf_is_reset_detected(adapter);
|
bool is_reset = idpf_is_reset_detected(adapter);
|
||||||
|
|
||||||
idpf_idc_issue_reset_event(adapter->cdev_info);
|
idpf_idc_issue_reset_event(adapter->cdev_info);
|
||||||
|
|
||||||
idpf_set_vport_state(adapter);
|
|
||||||
idpf_vc_core_deinit(adapter);
|
idpf_vc_core_deinit(adapter);
|
||||||
if (!is_reset)
|
if (!is_reset)
|
||||||
reg_ops->trigger_reset(adapter, IDPF_HR_FUNC_RESET);
|
reg_ops->trigger_reset(adapter, IDPF_HR_FUNC_RESET);
|
||||||
@ -1892,11 +1945,14 @@ static int idpf_init_hard_reset(struct idpf_adapter *adapter)
|
|||||||
unlock_mutex:
|
unlock_mutex:
|
||||||
mutex_unlock(&adapter->vport_ctrl_lock);
|
mutex_unlock(&adapter->vport_ctrl_lock);
|
||||||
|
|
||||||
/* Wait until all vports are created to init RDMA CORE AUX */
|
/* Attempt to restore netdevs and initialize RDMA CORE AUX device,
|
||||||
if (!err)
|
* provided vc_core_init succeeded. It is still possible that
|
||||||
err = idpf_idc_init(adapter);
|
* vports are not allocated at this point if the init task failed.
|
||||||
|
*/
|
||||||
return err;
|
if (!err) {
|
||||||
|
idpf_attach_and_open(adapter);
|
||||||
|
idpf_idc_init(adapter);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -1997,7 +2053,6 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
|
|||||||
idpf_vport_stop(vport, false);
|
idpf_vport_stop(vport, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
idpf_deinit_rss(vport);
|
|
||||||
/* We're passing in vport here because we need its wait_queue
|
/* We're passing in vport here because we need its wait_queue
|
||||||
* to send a message and it should be getting all the vport
|
* to send a message and it should be getting all the vport
|
||||||
* config data out of the adapter but we need to be careful not
|
* config data out of the adapter but we need to be careful not
|
||||||
@ -2023,6 +2078,10 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
|
|||||||
if (err)
|
if (err)
|
||||||
goto err_open;
|
goto err_open;
|
||||||
|
|
||||||
|
if (reset_cause == IDPF_SR_Q_CHANGE &&
|
||||||
|
!netif_is_rxfh_configured(vport->netdev))
|
||||||
|
idpf_fill_dflt_rss_lut(vport);
|
||||||
|
|
||||||
if (vport_is_up)
|
if (vport_is_up)
|
||||||
err = idpf_vport_open(vport, false);
|
err = idpf_vport_open(vport, false);
|
||||||
|
|
||||||
@ -2165,40 +2224,6 @@ static void idpf_set_rx_mode(struct net_device *netdev)
|
|||||||
dev_err(dev, "Failed to set promiscuous mode: %d\n", err);
|
dev_err(dev, "Failed to set promiscuous mode: %d\n", err);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* idpf_vport_manage_rss_lut - disable/enable RSS
|
|
||||||
* @vport: the vport being changed
|
|
||||||
*
|
|
||||||
* In the event of disable request for RSS, this function will zero out RSS
|
|
||||||
* LUT, while in the event of enable request for RSS, it will reconfigure RSS
|
|
||||||
* LUT with the default LUT configuration.
|
|
||||||
*/
|
|
||||||
static int idpf_vport_manage_rss_lut(struct idpf_vport *vport)
|
|
||||||
{
|
|
||||||
bool ena = idpf_is_feature_ena(vport, NETIF_F_RXHASH);
|
|
||||||
struct idpf_rss_data *rss_data;
|
|
||||||
u16 idx = vport->idx;
|
|
||||||
int lut_size;
|
|
||||||
|
|
||||||
rss_data = &vport->adapter->vport_config[idx]->user_config.rss_data;
|
|
||||||
lut_size = rss_data->rss_lut_size * sizeof(u32);
|
|
||||||
|
|
||||||
if (ena) {
|
|
||||||
/* This will contain the default or user configured LUT */
|
|
||||||
memcpy(rss_data->rss_lut, rss_data->cached_lut, lut_size);
|
|
||||||
} else {
|
|
||||||
/* Save a copy of the current LUT to be restored later if
|
|
||||||
* requested.
|
|
||||||
*/
|
|
||||||
memcpy(rss_data->cached_lut, rss_data->rss_lut, lut_size);
|
|
||||||
|
|
||||||
/* Zero out the current LUT to disable */
|
|
||||||
memset(rss_data->rss_lut, 0, lut_size);
|
|
||||||
}
|
|
||||||
|
|
||||||
return idpf_config_rss(vport);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* idpf_set_features - set the netdev feature flags
|
* idpf_set_features - set the netdev feature flags
|
||||||
* @netdev: ptr to the netdev being adjusted
|
* @netdev: ptr to the netdev being adjusted
|
||||||
@ -2224,10 +2249,19 @@ static int idpf_set_features(struct net_device *netdev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (changed & NETIF_F_RXHASH) {
|
if (changed & NETIF_F_RXHASH) {
|
||||||
|
struct idpf_netdev_priv *np = netdev_priv(netdev);
|
||||||
|
|
||||||
netdev->features ^= NETIF_F_RXHASH;
|
netdev->features ^= NETIF_F_RXHASH;
|
||||||
err = idpf_vport_manage_rss_lut(vport);
|
|
||||||
if (err)
|
/* If the interface is not up when changing the rxhash, update
|
||||||
goto unlock_mutex;
|
* to the HW is skipped. The updated LUT will be committed to
|
||||||
|
* the HW when the interface is brought up.
|
||||||
|
*/
|
||||||
|
if (test_bit(IDPF_VPORT_UP, np->state)) {
|
||||||
|
err = idpf_config_rss(vport);
|
||||||
|
if (err)
|
||||||
|
goto unlock_mutex;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (changed & NETIF_F_GRO_HW) {
|
if (changed & NETIF_F_GRO_HW) {
|
||||||
|
|||||||
@ -695,9 +695,10 @@ err:
|
|||||||
static int idpf_rx_bufs_init_singleq(struct idpf_rx_queue *rxq)
|
static int idpf_rx_bufs_init_singleq(struct idpf_rx_queue *rxq)
|
||||||
{
|
{
|
||||||
struct libeth_fq fq = {
|
struct libeth_fq fq = {
|
||||||
.count = rxq->desc_count,
|
.count = rxq->desc_count,
|
||||||
.type = LIBETH_FQE_MTU,
|
.type = LIBETH_FQE_MTU,
|
||||||
.nid = idpf_q_vector_to_mem(rxq->q_vector),
|
.buf_len = IDPF_RX_MAX_BUF_SZ,
|
||||||
|
.nid = idpf_q_vector_to_mem(rxq->q_vector),
|
||||||
};
|
};
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -754,6 +755,7 @@ static int idpf_rx_bufs_init(struct idpf_buf_queue *bufq,
|
|||||||
.truesize = bufq->truesize,
|
.truesize = bufq->truesize,
|
||||||
.count = bufq->desc_count,
|
.count = bufq->desc_count,
|
||||||
.type = type,
|
.type = type,
|
||||||
|
.buf_len = IDPF_RX_MAX_BUF_SZ,
|
||||||
.hsplit = idpf_queue_has(HSPLIT_EN, bufq),
|
.hsplit = idpf_queue_has(HSPLIT_EN, bufq),
|
||||||
.xdp = idpf_xdp_enabled(bufq->q_vector->vport),
|
.xdp = idpf_xdp_enabled(bufq->q_vector->vport),
|
||||||
.nid = idpf_q_vector_to_mem(bufq->q_vector),
|
.nid = idpf_q_vector_to_mem(bufq->q_vector),
|
||||||
@ -4641,7 +4643,7 @@ int idpf_config_rss(struct idpf_vport *vport)
|
|||||||
* idpf_fill_dflt_rss_lut - Fill the indirection table with the default values
|
* idpf_fill_dflt_rss_lut - Fill the indirection table with the default values
|
||||||
* @vport: virtual port structure
|
* @vport: virtual port structure
|
||||||
*/
|
*/
|
||||||
static void idpf_fill_dflt_rss_lut(struct idpf_vport *vport)
|
void idpf_fill_dflt_rss_lut(struct idpf_vport *vport)
|
||||||
{
|
{
|
||||||
struct idpf_adapter *adapter = vport->adapter;
|
struct idpf_adapter *adapter = vport->adapter;
|
||||||
u16 num_active_rxq = vport->num_rxq;
|
u16 num_active_rxq = vport->num_rxq;
|
||||||
@ -4650,57 +4652,47 @@ static void idpf_fill_dflt_rss_lut(struct idpf_vport *vport)
|
|||||||
|
|
||||||
rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
|
rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
|
||||||
|
|
||||||
for (i = 0; i < rss_data->rss_lut_size; i++) {
|
for (i = 0; i < rss_data->rss_lut_size; i++)
|
||||||
rss_data->rss_lut[i] = i % num_active_rxq;
|
rss_data->rss_lut[i] = i % num_active_rxq;
|
||||||
rss_data->cached_lut[i] = rss_data->rss_lut[i];
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* idpf_init_rss - Allocate and initialize RSS resources
|
* idpf_init_rss_lut - Allocate and initialize RSS LUT
|
||||||
* @vport: virtual port
|
* @vport: virtual port
|
||||||
*
|
*
|
||||||
* Return 0 on success, negative on failure
|
* Return: 0 on success, negative on failure
|
||||||
*/
|
*/
|
||||||
int idpf_init_rss(struct idpf_vport *vport)
|
int idpf_init_rss_lut(struct idpf_vport *vport)
|
||||||
{
|
{
|
||||||
struct idpf_adapter *adapter = vport->adapter;
|
struct idpf_adapter *adapter = vport->adapter;
|
||||||
struct idpf_rss_data *rss_data;
|
struct idpf_rss_data *rss_data;
|
||||||
u32 lut_size;
|
|
||||||
|
|
||||||
rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
|
rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
|
||||||
|
if (!rss_data->rss_lut) {
|
||||||
|
u32 lut_size;
|
||||||
|
|
||||||
lut_size = rss_data->rss_lut_size * sizeof(u32);
|
lut_size = rss_data->rss_lut_size * sizeof(u32);
|
||||||
rss_data->rss_lut = kzalloc(lut_size, GFP_KERNEL);
|
rss_data->rss_lut = kzalloc(lut_size, GFP_KERNEL);
|
||||||
if (!rss_data->rss_lut)
|
if (!rss_data->rss_lut)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
rss_data->cached_lut = kzalloc(lut_size, GFP_KERNEL);
|
|
||||||
if (!rss_data->cached_lut) {
|
|
||||||
kfree(rss_data->rss_lut);
|
|
||||||
rss_data->rss_lut = NULL;
|
|
||||||
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Fill the default RSS lut values */
|
/* Fill the default RSS lut values */
|
||||||
idpf_fill_dflt_rss_lut(vport);
|
idpf_fill_dflt_rss_lut(vport);
|
||||||
|
|
||||||
return idpf_config_rss(vport);
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* idpf_deinit_rss - Release RSS resources
|
* idpf_deinit_rss_lut - Release RSS LUT
|
||||||
* @vport: virtual port
|
* @vport: virtual port
|
||||||
*/
|
*/
|
||||||
void idpf_deinit_rss(struct idpf_vport *vport)
|
void idpf_deinit_rss_lut(struct idpf_vport *vport)
|
||||||
{
|
{
|
||||||
struct idpf_adapter *adapter = vport->adapter;
|
struct idpf_adapter *adapter = vport->adapter;
|
||||||
struct idpf_rss_data *rss_data;
|
struct idpf_rss_data *rss_data;
|
||||||
|
|
||||||
rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
|
rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
|
||||||
kfree(rss_data->cached_lut);
|
|
||||||
rss_data->cached_lut = NULL;
|
|
||||||
kfree(rss_data->rss_lut);
|
kfree(rss_data->rss_lut);
|
||||||
rss_data->rss_lut = NULL;
|
rss_data->rss_lut = NULL;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -101,6 +101,7 @@ do { \
|
|||||||
idx = 0; \
|
idx = 0; \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
#define IDPF_RX_MAX_BUF_SZ (16384 - 128)
|
||||||
#define IDPF_RX_BUF_STRIDE 32
|
#define IDPF_RX_BUF_STRIDE 32
|
||||||
#define IDPF_RX_BUF_POST_STRIDE 16
|
#define IDPF_RX_BUF_POST_STRIDE 16
|
||||||
#define IDPF_LOW_WATERMARK 64
|
#define IDPF_LOW_WATERMARK 64
|
||||||
@ -1085,9 +1086,10 @@ void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector);
|
|||||||
void idpf_vport_intr_deinit(struct idpf_vport *vport);
|
void idpf_vport_intr_deinit(struct idpf_vport *vport);
|
||||||
int idpf_vport_intr_init(struct idpf_vport *vport);
|
int idpf_vport_intr_init(struct idpf_vport *vport);
|
||||||
void idpf_vport_intr_ena(struct idpf_vport *vport);
|
void idpf_vport_intr_ena(struct idpf_vport *vport);
|
||||||
|
void idpf_fill_dflt_rss_lut(struct idpf_vport *vport);
|
||||||
int idpf_config_rss(struct idpf_vport *vport);
|
int idpf_config_rss(struct idpf_vport *vport);
|
||||||
int idpf_init_rss(struct idpf_vport *vport);
|
int idpf_init_rss_lut(struct idpf_vport *vport);
|
||||||
void idpf_deinit_rss(struct idpf_vport *vport);
|
void idpf_deinit_rss_lut(struct idpf_vport *vport);
|
||||||
int idpf_rx_bufs_init_all(struct idpf_vport *vport);
|
int idpf_rx_bufs_init_all(struct idpf_vport *vport);
|
||||||
|
|
||||||
struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport,
|
struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport,
|
||||||
|
|||||||
@ -2804,6 +2804,10 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport)
|
|||||||
* @vport: virtual port data structure
|
* @vport: virtual port data structure
|
||||||
* @get: flag to set or get rss look up table
|
* @get: flag to set or get rss look up table
|
||||||
*
|
*
|
||||||
|
* When rxhash is disabled, RSS LUT will be configured with zeros. If rxhash
|
||||||
|
* is enabled, the LUT values stored in driver's soft copy will be used to setup
|
||||||
|
* the HW.
|
||||||
|
*
|
||||||
* Returns 0 on success, negative on failure.
|
* Returns 0 on success, negative on failure.
|
||||||
*/
|
*/
|
||||||
int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get)
|
int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get)
|
||||||
@ -2814,10 +2818,12 @@ int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get)
|
|||||||
struct idpf_rss_data *rss_data;
|
struct idpf_rss_data *rss_data;
|
||||||
int buf_size, lut_buf_size;
|
int buf_size, lut_buf_size;
|
||||||
ssize_t reply_sz;
|
ssize_t reply_sz;
|
||||||
|
bool rxhash_ena;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
rss_data =
|
rss_data =
|
||||||
&vport->adapter->vport_config[vport->idx]->user_config.rss_data;
|
&vport->adapter->vport_config[vport->idx]->user_config.rss_data;
|
||||||
|
rxhash_ena = idpf_is_feature_ena(vport, NETIF_F_RXHASH);
|
||||||
buf_size = struct_size(rl, lut, rss_data->rss_lut_size);
|
buf_size = struct_size(rl, lut, rss_data->rss_lut_size);
|
||||||
rl = kzalloc(buf_size, GFP_KERNEL);
|
rl = kzalloc(buf_size, GFP_KERNEL);
|
||||||
if (!rl)
|
if (!rl)
|
||||||
@ -2839,7 +2845,8 @@ int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get)
|
|||||||
} else {
|
} else {
|
||||||
rl->lut_entries = cpu_to_le16(rss_data->rss_lut_size);
|
rl->lut_entries = cpu_to_le16(rss_data->rss_lut_size);
|
||||||
for (i = 0; i < rss_data->rss_lut_size; i++)
|
for (i = 0; i < rss_data->rss_lut_size; i++)
|
||||||
rl->lut[i] = cpu_to_le32(rss_data->rss_lut[i]);
|
rl->lut[i] = rxhash_ena ?
|
||||||
|
cpu_to_le32(rss_data->rss_lut[i]) : 0;
|
||||||
|
|
||||||
xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_LUT;
|
xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_LUT;
|
||||||
}
|
}
|
||||||
@ -3570,6 +3577,7 @@ init_failed:
|
|||||||
*/
|
*/
|
||||||
void idpf_vc_core_deinit(struct idpf_adapter *adapter)
|
void idpf_vc_core_deinit(struct idpf_adapter *adapter)
|
||||||
{
|
{
|
||||||
|
struct idpf_hw *hw = &adapter->hw;
|
||||||
bool remove_in_prog;
|
bool remove_in_prog;
|
||||||
|
|
||||||
if (!test_bit(IDPF_VC_CORE_INIT, adapter->flags))
|
if (!test_bit(IDPF_VC_CORE_INIT, adapter->flags))
|
||||||
@ -3593,6 +3601,9 @@ void idpf_vc_core_deinit(struct idpf_adapter *adapter)
|
|||||||
|
|
||||||
idpf_vport_params_buf_rel(adapter);
|
idpf_vport_params_buf_rel(adapter);
|
||||||
|
|
||||||
|
kfree(hw->lan_regs);
|
||||||
|
hw->lan_regs = NULL;
|
||||||
|
|
||||||
kfree(adapter->vports);
|
kfree(adapter->vports);
|
||||||
adapter->vports = NULL;
|
adapter->vports = NULL;
|
||||||
|
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user