Skip to content

Commit

Permalink
Merge branch 's390-qeth-next'
Browse files Browse the repository at this point in the history
Julian Wiedmann says:

====================
s390/qeth: updates 2020-03-18

please apply the following patch series for qeth to netdev's net-next
tree.

This consists of three parts:
1) support for __GFP_MEMALLOC,
2) several ethtool enhancements (.set_channels, SW Timestamping),
3) the usual cleanups.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
  • Loading branch information
David S. Miller committed Mar 18, 2020
2 parents a58741e + cd652be commit ce7964b
Show file tree
Hide file tree
Showing 6 changed files with 174 additions and 70 deletions.
7 changes: 2 additions & 5 deletions drivers/s390/net/qeth_core.h
Original file line number Diff line number Diff line change
Expand Up @@ -847,11 +847,6 @@ struct qeth_trap_id {
/*some helper functions*/
#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")

static inline bool qeth_netdev_is_registered(struct net_device *dev)
{
return dev->netdev_ops != NULL;
}

static inline u16 qeth_iqd_translate_txq(struct net_device *dev, u16 txq)
{
if (txq == QETH_IQD_MCAST_TXQ)
Expand Down Expand Up @@ -1053,13 +1048,15 @@ int qeth_configure_cq(struct qeth_card *, enum qeth_cq);
int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
void qeth_trace_features(struct qeth_card *);
int qeth_setassparms_cb(struct qeth_card *, struct qeth_reply *, unsigned long);
int qeth_setup_netdev(struct qeth_card *card);
int qeth_set_features(struct net_device *, netdev_features_t);
void qeth_enable_hw_features(struct net_device *dev);
netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
netdev_features_t qeth_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features);
void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats);
int qeth_set_real_num_tx_queues(struct qeth_card *card, unsigned int count);
u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
u8 cast_type, struct net_device *sb_dev);
int qeth_open(struct net_device *dev);
Expand Down
131 changes: 97 additions & 34 deletions drivers/s390/net/qeth_core_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -244,7 +244,7 @@ static struct qeth_buffer_pool_entry *qeth_alloc_pool_entry(unsigned int pages)
return NULL;

for (i = 0; i < pages; i++) {
entry->elements[i] = alloc_page(GFP_KERNEL);
entry->elements[i] = __dev_alloc_page(GFP_KERNEL);

if (!entry->elements[i]) {
qeth_free_pool_entry(entry);
Expand Down Expand Up @@ -538,9 +538,10 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
for (i = 0;
i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
i++) {
if (aob->sba[i] && buffer->is_header[i])
kmem_cache_free(qeth_core_header_cache,
(void *) aob->sba[i]);
void *data = phys_to_virt(aob->sba[i]);

if (data && buffer->is_header[i])
kmem_cache_free(qeth_core_header_cache, data);
}
atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);

Expand Down Expand Up @@ -1244,26 +1245,29 @@ EXPORT_SYMBOL_GPL(qeth_drain_output_queues);

static int qeth_osa_set_output_queues(struct qeth_card *card, bool single)
{
unsigned int count = single ? 1 : card->dev->num_tx_queues;
unsigned int max = single ? 1 : card->dev->num_tx_queues;
unsigned int count;
int rc;

count = IS_VM_NIC(card) ? min(max, card->dev->real_num_tx_queues) : max;

rtnl_lock();
rc = netif_set_real_num_tx_queues(card->dev, count);
rtnl_unlock();

if (rc)
return rc;

if (card->qdio.no_out_queues == count)
if (card->qdio.no_out_queues == max)
return 0;

if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED)
qeth_free_qdio_queues(card);

if (count == 1)
if (max == 1 && card->qdio.do_prio_queueing != QETH_PRIOQ_DEFAULT)
dev_info(&card->gdev->dev, "Priority Queueing not supported\n");

card->qdio.no_out_queues = count;
card->qdio.no_out_queues = max;
return 0;
}

Expand Down Expand Up @@ -2654,7 +2658,7 @@ static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
struct qeth_buffer_pool_entry, list);
for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
if (page_count(entry->elements[i]) > 1) {
struct page *page = alloc_page(GFP_ATOMIC);
struct page *page = dev_alloc_page();

if (!page)
return NULL;
Expand Down Expand Up @@ -3352,6 +3356,7 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,

for (i = index; i < index + count; ++i) {
unsigned int bidx = QDIO_BUFNR(i);
struct sk_buff *skb;

buf = queue->bufs[bidx];
buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
Expand All @@ -3360,8 +3365,11 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
if (queue->bufstates)
queue->bufstates[bidx].user = buf;

if (IS_IQD(queue->card))
if (IS_IQD(card)) {
skb_queue_walk(&buf->skb_list, skb)
skb_tx_timestamp(skb);
continue;
}

if (!queue->do_pack) {
if ((atomic_read(&queue->used_buffers) >=
Expand Down Expand Up @@ -3705,6 +3713,7 @@ static int qeth_add_hw_header(struct qeth_qdio_out_q *queue,
unsigned int hdr_len, unsigned int proto_len,
unsigned int *elements)
{
gfp_t gfp = GFP_ATOMIC | (skb_pfmemalloc(skb) ? __GFP_MEMALLOC : 0);
const unsigned int contiguous = proto_len ? proto_len : 1;
const unsigned int max_elements = queue->max_elements;
unsigned int __elements;
Expand Down Expand Up @@ -3760,10 +3769,11 @@ static int qeth_add_hw_header(struct qeth_qdio_out_q *queue,
*hdr = skb_push(skb, hdr_len);
return hdr_len;
}
/* fall back */

/* Fall back to cache element with known-good alignment: */
if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE)
return -E2BIG;
*hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
*hdr = kmem_cache_alloc(qeth_core_header_cache, gfp);
if (!*hdr)
return -ENOMEM;
/* Copy protocol headers behind HW header: */
Expand Down Expand Up @@ -5985,22 +5995,8 @@ static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
SET_NETDEV_DEV(dev, &card->gdev->dev);
netif_carrier_off(dev);

if (IS_OSN(card)) {
dev->ethtool_ops = &qeth_osn_ethtool_ops;
} else {
dev->ethtool_ops = &qeth_ethtool_ops;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->hw_features |= NETIF_F_SG;
dev->vlan_features |= NETIF_F_SG;
if (IS_IQD(card)) {
dev->features |= NETIF_F_SG;
if (netif_set_real_num_tx_queues(dev,
QETH_IQD_MIN_TXQ)) {
free_netdev(dev);
return NULL;
}
}
}
dev->ethtool_ops = IS_OSN(card) ? &qeth_osn_ethtool_ops :
&qeth_ethtool_ops;

return dev;
}
Expand All @@ -6016,6 +6012,28 @@ struct net_device *qeth_clone_netdev(struct net_device *orig)
return clone;
}

int qeth_setup_netdev(struct qeth_card *card)
{
struct net_device *dev = card->dev;
unsigned int num_tx_queues;

dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->hw_features |= NETIF_F_SG;
dev->vlan_features |= NETIF_F_SG;

if (IS_IQD(card)) {
dev->features |= NETIF_F_SG;
num_tx_queues = QETH_IQD_MIN_TXQ;
} else if (IS_VM_NIC(card)) {
num_tx_queues = 1;
} else {
num_tx_queues = dev->real_num_tx_queues;
}

return qeth_set_real_num_tx_queues(card, num_tx_queues);
}
EXPORT_SYMBOL_GPL(qeth_setup_netdev);

static int qeth_core_probe_device(struct ccwgroup_device *gdev)
{
struct qeth_card *card;
Expand Down Expand Up @@ -6055,12 +6073,13 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
goto err_card;
}

qeth_determine_capabilities(card);
qeth_set_blkt_defaults(card);

card->qdio.no_out_queues = card->dev->num_tx_queues;
rc = qeth_update_from_chp_desc(card);
if (rc)
goto err_chp_desc;
qeth_determine_capabilities(card);
qeth_set_blkt_defaults(card);

enforced_disc = qeth_enforce_discipline(card);
switch (enforced_disc) {
Expand Down Expand Up @@ -6245,9 +6264,6 @@ int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
struct mii_ioctl_data *mii_data;
int rc = 0;

if (!card)
return -ENODEV;

switch (cmd) {
case SIOC_QETH_ADP_SET_SNMP_CONTROL:
rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
Expand Down Expand Up @@ -6627,12 +6643,59 @@ void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
}
EXPORT_SYMBOL_GPL(qeth_get_stats64);

#define TC_IQD_UCAST 0
static void qeth_iqd_set_prio_tc_map(struct net_device *dev,
unsigned int ucast_txqs)
{
unsigned int prio;

/* IQD requires mcast traffic to be placed on a dedicated queue, and
* qeth_iqd_select_queue() deals with this.
* For unicast traffic, we defer the queue selection to the stack.
* By installing a trivial prio map that spans over only the unicast
* queues, we can encourage the stack to spread the ucast traffic evenly
* without selecting the mcast queue.
*/

/* One traffic class, spanning over all active ucast queues: */
netdev_set_num_tc(dev, 1);
netdev_set_tc_queue(dev, TC_IQD_UCAST, ucast_txqs,
QETH_IQD_MIN_UCAST_TXQ);

/* Map all priorities to this traffic class: */
for (prio = 0; prio <= TC_BITMASK; prio++)
netdev_set_prio_tc_map(dev, prio, TC_IQD_UCAST);
}

int qeth_set_real_num_tx_queues(struct qeth_card *card, unsigned int count)
{
struct net_device *dev = card->dev;
int rc;

/* Per netif_setup_tc(), adjust the mapping first: */
if (IS_IQD(card))
qeth_iqd_set_prio_tc_map(dev, count - 1);

rc = netif_set_real_num_tx_queues(dev, count);

if (rc && IS_IQD(card))
qeth_iqd_set_prio_tc_map(dev, dev->real_num_tx_queues - 1);

return rc;
}

u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
u8 cast_type, struct net_device *sb_dev)
{
u16 txq;

if (cast_type != RTN_UNICAST)
return QETH_IQD_MCAST_TXQ;
return QETH_IQD_MIN_UCAST_TXQ;
if (dev->real_num_tx_queues == QETH_IQD_MIN_TXQ)
return QETH_IQD_MIN_UCAST_TXQ;

txq = netdev_pick_tx(dev, skb, sb_dev);
return (txq == QETH_IQD_MCAST_TXQ) ? QETH_IQD_MIN_UCAST_TXQ : txq;
}
EXPORT_SYMBOL_GPL(qeth_iqd_select_queue);

Expand Down
2 changes: 1 addition & 1 deletion drivers/s390/net/qeth_core_sys.c
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev,
struct qeth_card *card = dev_get_drvdata(dev);
int rc = 0;

if (IS_IQD(card))
if (IS_IQD(card) || IS_VM_NIC(card))
return -EOPNOTSUPP;

mutex_lock(&card->conf_mutex);
Expand Down
43 changes: 42 additions & 1 deletion drivers/s390/net/qeth_ethtool.c
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,6 @@ static void qeth_get_drvinfo(struct net_device *dev,

strlcpy(info->driver, IS_LAYER2(card) ? "qeth_l2" : "qeth_l3",
sizeof(info->driver));
strlcpy(info->version, "1.0", sizeof(info->version));
strlcpy(info->fw_version, card->info.mcl_level,
sizeof(info->fw_version));
snprintf(info->bus_info, sizeof(info->bus_info), "%s/%s/%s",
Expand All @@ -175,6 +174,46 @@ static void qeth_get_channels(struct net_device *dev,
channels->combined_count = 0;
}

static int qeth_set_channels(struct net_device *dev,
struct ethtool_channels *channels)
{
struct qeth_card *card = dev->ml_priv;

if (channels->rx_count == 0 || channels->tx_count == 0)
return -EINVAL;
if (channels->tx_count > card->qdio.no_out_queues)
return -EINVAL;

if (IS_IQD(card)) {
if (channels->tx_count < QETH_IQD_MIN_TXQ)
return -EINVAL;

/* Reject downgrade while running. It could push displaced
* ucast flows onto txq0, which is reserved for mcast.
*/
if (netif_running(dev) &&
channels->tx_count < dev->real_num_tx_queues)
return -EPERM;
} else {
/* OSA still uses the legacy prio-queue mechanism: */
if (!IS_VM_NIC(card))
return -EOPNOTSUPP;
}

return qeth_set_real_num_tx_queues(card, channels->tx_count);
}

static int qeth_get_ts_info(struct net_device *dev,
struct ethtool_ts_info *info)
{
struct qeth_card *card = dev->ml_priv;

if (!IS_IQD(card))
return -EOPNOTSUPP;

return ethtool_op_get_ts_info(dev, info);
}

static int qeth_get_tunable(struct net_device *dev,
const struct ethtool_tunable *tuna, void *data)
{
Expand Down Expand Up @@ -410,6 +449,8 @@ const struct ethtool_ops qeth_ethtool_ops = {
.get_sset_count = qeth_get_sset_count,
.get_drvinfo = qeth_get_drvinfo,
.get_channels = qeth_get_channels,
.set_channels = qeth_set_channels,
.get_ts_info = qeth_get_ts_info,
.get_tunable = qeth_get_tunable,
.set_tunable = qeth_set_tunable,
.get_link_ksettings = qeth_get_link_ksettings,
Expand Down
Loading

0 comments on commit ce7964b

Please sign in to comment.