Merge branch 's390-qeth-next'

Julian Wiedmann says:

====================
s390/qeth: updates 2020-08-27

please apply the following patch series for qeth to netdev's net-next tree.

Patch 8 makes some improvements to how we handle HW address events,
avoiding some uncertainty around processing stale events after we
switched off the feature.
Except for that it's all straight-forward cleanups.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2020-08-27 07:42:04 -07:00
commit 44771ea574
6 changed files with 100 additions and 113 deletions

View File

@ -420,12 +420,6 @@ struct qeth_qdio_out_buffer {
struct qeth_card;
enum qeth_out_q_states {
QETH_OUT_Q_UNLOCKED,
QETH_OUT_Q_LOCKED,
QETH_OUT_Q_LOCKED_FLUSH,
};
#define QETH_CARD_STAT_ADD(_c, _stat, _val) ((_c)->stats._stat += (_val))
#define QETH_CARD_STAT_INC(_c, _stat) QETH_CARD_STAT_ADD(_c, _stat, 1)
@ -486,12 +480,12 @@ struct qeth_qdio_out_q {
struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q];
struct qdio_outbuf_state *bufstates; /* convenience pointer */
struct qeth_out_q_stats stats;
spinlock_t lock;
u8 next_buf_to_fill;
u8 max_elements;
u8 queue_no;
u8 do_pack;
struct qeth_card *card;
atomic_t state;
/*
* number of buffers that are currently filled (PRIMED)
* -> these buffers are hardware-owned
@ -680,6 +674,11 @@ struct qeth_card_blkt {
int inter_packet_jumbo;
};
enum qeth_pnso_mode {
QETH_PNSO_NONE,
QETH_PNSO_BRIDGEPORT,
};
#define QETH_BROADCAST_WITH_ECHO 0x01
#define QETH_BROADCAST_WITHOUT_ECHO 0x02
struct qeth_card_info {
@ -696,6 +695,7 @@ struct qeth_card_info {
/* no bitfield, we take a pointer on these two: */
u8 has_lp2lp_cso_v6;
u8 has_lp2lp_cso_v4;
enum qeth_pnso_mode pnso_mode;
enum qeth_card_types type;
enum qeth_link_types link_type;
int broadcast_capable;

View File

@ -2702,6 +2702,7 @@ static int qeth_alloc_qdio_queues(struct qeth_card *card)
card->qdio.out_qs[i] = queue;
queue->card = card;
queue->queue_no = i;
spin_lock_init(&queue->lock);
timer_setup(&queue->timer, qeth_tx_completion_timer, 0);
queue->coalesce_usecs = QETH_TX_COALESCE_USECS;
queue->max_coalesced_frames = QETH_TX_MAX_COALESCED_FRAMES;
@ -3068,7 +3069,6 @@ static int qeth_init_qdio_queues(struct qeth_card *card)
queue->bulk_max = qeth_tx_select_bulk_max(card, queue);
atomic_set(&queue->used_buffers, 0);
atomic_set(&queue->set_pci_flags_count, 0);
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
netdev_tx_reset_queue(netdev_get_tx_queue(card->dev, i));
}
return 0;
@ -3549,8 +3549,9 @@ static unsigned int qeth_rx_refill_queue(struct qeth_card *card,
static void qeth_buffer_reclaim_work(struct work_struct *work)
{
struct qeth_card *card = container_of(work, struct qeth_card,
buffer_reclaim_work.work);
struct qeth_card *card = container_of(to_delayed_work(work),
struct qeth_card,
buffer_reclaim_work);
local_bh_disable();
napi_schedule(&card->napi);
@ -3740,37 +3741,31 @@ static void qeth_flush_queue(struct qeth_qdio_out_q *queue)
static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
{
int index;
int flush_cnt = 0;
int q_was_packing = 0;
/*
* check if weed have to switch to non-packing mode or if
* we have to get a pci flag out on the queue
*/
if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
!atomic_read(&queue->set_pci_flags_count)) {
if (atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) ==
QETH_OUT_Q_UNLOCKED) {
/*
* If we get in here, there was no action in
* do_send_packet. So, we check if there is a
* packing buffer to be flushed here.
*/
index = queue->next_buf_to_fill;
q_was_packing = queue->do_pack;
/* queue->do_pack may change */
barrier();
flush_cnt += qeth_switch_to_nonpacking_if_needed(queue);
if (!flush_cnt &&
!atomic_read(&queue->set_pci_flags_count))
flush_cnt += qeth_prep_flush_pack_buffer(queue);
unsigned int index, flush_cnt;
bool q_was_packing;
spin_lock(&queue->lock);
index = queue->next_buf_to_fill;
q_was_packing = queue->do_pack;
flush_cnt = qeth_switch_to_nonpacking_if_needed(queue);
if (!flush_cnt && !atomic_read(&queue->set_pci_flags_count))
flush_cnt = qeth_prep_flush_pack_buffer(queue);
if (flush_cnt) {
qeth_flush_buffers(queue, index, flush_cnt);
if (q_was_packing)
QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt);
if (flush_cnt)
qeth_flush_buffers(queue, index, flush_cnt);
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
}
spin_unlock(&queue->lock);
}
}
@ -4282,29 +4277,22 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
unsigned int offset, unsigned int hd_len,
int elements_needed)
{
unsigned int start_index = queue->next_buf_to_fill;
struct qeth_qdio_out_buffer *buffer;
unsigned int next_element;
struct netdev_queue *txq;
bool stopped = false;
int start_index;
int flush_count = 0;
int do_pack = 0;
int tmp;
int rc = 0;
/* spin until we get the queue ... */
while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
start_index = queue->next_buf_to_fill;
buffer = queue->bufs[queue->next_buf_to_fill];
/* Just a sanity check, the wake/stop logic should ensure that we always
* get a free buffer.
*/
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
return -EBUSY;
}
txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
@ -4327,8 +4315,6 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
QETH_QDIO_BUF_EMPTY) {
qeth_flush_buffers(queue, start_index,
flush_count);
atomic_set(&queue->state,
QETH_OUT_Q_UNLOCKED);
rc = -EBUSY;
goto out;
}
@ -4360,31 +4346,8 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
if (flush_count)
qeth_flush_buffers(queue, start_index, flush_count);
else if (!atomic_read(&queue->set_pci_flags_count))
atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH);
/*
* queue->state will go from LOCKED -> UNLOCKED or from
* LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us
* (switch packing state or flush buffer to get another pci flag out).
* In that case we will enter this loop
*/
while (atomic_dec_return(&queue->state)) {
start_index = queue->next_buf_to_fill;
/* check if we can go back to non-packing state */
tmp = qeth_switch_to_nonpacking_if_needed(queue);
/*
* check if we need to flush a packing buffer to get a pci
* flag out on the queue
*/
if (!tmp && !atomic_read(&queue->set_pci_flags_count))
tmp = qeth_prep_flush_pack_buffer(queue);
if (tmp) {
qeth_flush_buffers(queue, start_index, tmp);
flush_count += tmp;
}
}
out:
/* at this point the queue is UNLOCKED again */
if (do_pack)
QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count);
@ -4458,8 +4421,10 @@ int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
} else {
/* TODO: drop skb_orphan() once TX completion is fast enough */
skb_orphan(skb);
spin_lock(&queue->lock);
rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
hd_len, elements);
spin_unlock(&queue->lock);
}
if (rc && !push_len)

View File

@ -719,15 +719,8 @@ struct qeth_sbp_port_entry {
struct net_if_token token;
} __packed;
struct qeth_sbp_query_ports {
__u8 primary_bp_supported;
__u8 secondary_bp_supported;
__u8 num_entries;
__u8 entry_length;
struct qeth_sbp_port_entry entry[];
} __packed;
struct qeth_sbp_state_change {
/* For IPA_SBP_QUERY_BRIDGE_PORTS, IPA_SBP_BRIDGE_PORT_STATE_CHANGE */
struct qeth_sbp_port_data {
__u8 primary_bp_supported;
__u8 secondary_bp_supported;
__u8 num_entries;
@ -741,8 +734,7 @@ struct qeth_ipacmd_setbridgeport {
union {
struct qeth_sbp_query_cmds_supp query_cmds_supp;
struct qeth_sbp_set_primary set_primary;
struct qeth_sbp_query_ports query_ports;
struct qeth_sbp_state_change state_change;
struct qeth_sbp_port_data port_data;
} data;
} __packed;

View File

@ -273,6 +273,17 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
return qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
}
static void qeth_l2_set_pnso_mode(struct qeth_card *card,
enum qeth_pnso_mode mode)
{
spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
WRITE_ONCE(card->info.pnso_mode, mode);
spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
if (mode == QETH_PNSO_NONE)
drain_workqueue(card->event_wq);
}
static void qeth_l2_stop_card(struct qeth_card *card)
{
QETH_CARD_TEXT(card, 2, "stopcard");
@ -290,7 +301,7 @@ static void qeth_l2_stop_card(struct qeth_card *card)
qeth_qdio_clear_card(card, 0);
qeth_clear_working_pool_list(card);
flush_workqueue(card->event_wq);
qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
qeth_flush_local_addrs(card);
card->info.promisc_mode = 0;
}
@ -810,8 +821,6 @@ static void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card)
if (card->options.sbp.hostnotification) {
if (qeth_bridgeport_an_set(card, 1))
card->options.sbp.hostnotification = 0;
} else {
qeth_bridgeport_an_set(card, 0);
}
}
@ -1090,15 +1099,14 @@ static void qeth_bridge_emit_host_event(struct qeth_card *card,
struct qeth_bridge_state_data {
struct work_struct worker;
struct qeth_card *card;
struct qeth_sbp_state_change qports;
u8 role;
u8 state;
};
static void qeth_bridge_state_change_worker(struct work_struct *work)
{
struct qeth_bridge_state_data *data =
container_of(work, struct qeth_bridge_state_data, worker);
/* We are only interested in the first entry - local port */
struct qeth_sbp_port_entry *entry = &data->qports.entry[0];
char env_locrem[32];
char env_role[32];
char env_state[32];
@ -1109,22 +1117,16 @@ static void qeth_bridge_state_change_worker(struct work_struct *work)
NULL
};
/* Role should not change by itself, but if it did, */
/* information from the hardware is authoritative. */
mutex_lock(&data->card->sbp_lock);
data->card->options.sbp.role = entry->role;
mutex_unlock(&data->card->sbp_lock);
snprintf(env_locrem, sizeof(env_locrem), "BRIDGEPORT=statechange");
snprintf(env_role, sizeof(env_role), "ROLE=%s",
(entry->role == QETH_SBP_ROLE_NONE) ? "none" :
(entry->role == QETH_SBP_ROLE_PRIMARY) ? "primary" :
(entry->role == QETH_SBP_ROLE_SECONDARY) ? "secondary" :
(data->role == QETH_SBP_ROLE_NONE) ? "none" :
(data->role == QETH_SBP_ROLE_PRIMARY) ? "primary" :
(data->role == QETH_SBP_ROLE_SECONDARY) ? "secondary" :
"<INVALID>");
snprintf(env_state, sizeof(env_state), "STATE=%s",
(entry->state == QETH_SBP_STATE_INACTIVE) ? "inactive" :
(entry->state == QETH_SBP_STATE_STANDBY) ? "standby" :
(entry->state == QETH_SBP_STATE_ACTIVE) ? "active" :
(data->state == QETH_SBP_STATE_INACTIVE) ? "inactive" :
(data->state == QETH_SBP_STATE_STANDBY) ? "standby" :
(data->state == QETH_SBP_STATE_ACTIVE) ? "active" :
"<INVALID>");
kobject_uevent_env(&data->card->gdev->dev.kobj,
KOBJ_CHANGE, env);
@ -1134,10 +1136,8 @@ static void qeth_bridge_state_change_worker(struct work_struct *work)
static void qeth_bridge_state_change(struct qeth_card *card,
struct qeth_ipa_cmd *cmd)
{
struct qeth_sbp_state_change *qports =
&cmd->data.sbp.data.state_change;
struct qeth_sbp_port_data *qports = &cmd->data.sbp.data.port_data;
struct qeth_bridge_state_data *data;
int extrasize;
QETH_CARD_TEXT(card, 2, "brstchng");
if (qports->num_entries == 0) {
@ -1148,34 +1148,50 @@ static void qeth_bridge_state_change(struct qeth_card *card,
QETH_CARD_TEXT_(card, 2, "BPsz%04x", qports->entry_length);
return;
}
extrasize = sizeof(struct qeth_sbp_port_entry) * qports->num_entries;
data = kzalloc(sizeof(struct qeth_bridge_state_data) + extrasize,
GFP_ATOMIC);
data = kzalloc(sizeof(*data), GFP_ATOMIC);
if (!data) {
QETH_CARD_TEXT(card, 2, "BPSalloc");
return;
}
INIT_WORK(&data->worker, qeth_bridge_state_change_worker);
data->card = card;
memcpy(&data->qports, qports,
sizeof(struct qeth_sbp_state_change) + extrasize);
/* Information for the local port: */
data->role = qports->entry[0].role;
data->state = qports->entry[0].state;
queue_work(card->event_wq, &data->worker);
}
struct qeth_addr_change_data {
struct work_struct worker;
struct delayed_work dwork;
struct qeth_card *card;
struct qeth_ipacmd_addr_change ac_event;
};
static void qeth_addr_change_event_worker(struct work_struct *work)
{
struct qeth_addr_change_data *data =
container_of(work, struct qeth_addr_change_data, worker);
struct delayed_work *dwork = to_delayed_work(work);
struct qeth_addr_change_data *data;
struct qeth_card *card;
int i;
data = container_of(dwork, struct qeth_addr_change_data, dwork);
card = data->card;
QETH_CARD_TEXT(data->card, 4, "adrchgew");
if (READ_ONCE(card->info.pnso_mode) == QETH_PNSO_NONE)
goto free;
if (data->ac_event.lost_event_mask) {
/* Potential re-config in progress, try again later: */
if (!mutex_trylock(&card->sbp_lock)) {
queue_delayed_work(card->event_wq, dwork,
msecs_to_jiffies(100));
return;
}
dev_info(&data->card->gdev->dev,
"Address change notification stopped on %s (%s)\n",
data->card->dev->name,
@ -1184,8 +1200,9 @@ static void qeth_addr_change_event_worker(struct work_struct *work)
: (data->ac_event.lost_event_mask == 0x02)
? "Bridge port state change"
: "Unknown reason");
mutex_lock(&data->card->sbp_lock);
data->card->options.sbp.hostnotification = 0;
card->info.pnso_mode = QETH_PNSO_NONE;
mutex_unlock(&data->card->sbp_lock);
qeth_bridge_emit_host_event(data->card, anev_abort,
0, NULL, NULL);
@ -1199,6 +1216,8 @@ static void qeth_addr_change_event_worker(struct work_struct *work)
&entry->token,
&entry->addr_lnid);
}
free:
kfree(data);
}
@ -1210,6 +1229,9 @@ static void qeth_addr_change_event(struct qeth_card *card,
struct qeth_addr_change_data *data;
int extrasize;
if (card->info.pnso_mode == QETH_PNSO_NONE)
return;
QETH_CARD_TEXT(card, 4, "adrchgev");
if (cmd->hdr.return_code != 0x0000) {
if (cmd->hdr.return_code == 0x0010) {
@ -1229,11 +1251,11 @@ static void qeth_addr_change_event(struct qeth_card *card,
QETH_CARD_TEXT(card, 2, "ACNalloc");
return;
}
INIT_WORK(&data->worker, qeth_addr_change_event_worker);
INIT_DELAYED_WORK(&data->dwork, qeth_addr_change_event_worker);
data->card = card;
memcpy(&data->ac_event, hostevs,
sizeof(struct qeth_ipacmd_addr_change) + extrasize);
queue_work(card->event_wq, &data->worker);
queue_delayed_work(card->event_wq, &data->dwork, 0);
}
/* SETBRIDGEPORT support; sending commands */
@ -1418,8 +1440,8 @@ static int qeth_bridgeport_query_ports_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
{
struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
struct qeth_sbp_query_ports *qports = &cmd->data.sbp.data.query_ports;
struct _qeth_sbp_cbctl *cbctl = (struct _qeth_sbp_cbctl *)reply->param;
struct qeth_sbp_port_data *qports;
int rc;
QETH_CARD_TEXT(card, 2, "brqprtcb");
@ -1427,6 +1449,7 @@ static int qeth_bridgeport_query_ports_cb(struct qeth_card *card,
if (rc)
return rc;
qports = &cmd->data.sbp.data.port_data;
if (qports->entry_length != sizeof(struct qeth_sbp_port_entry)) {
QETH_CARD_TEXT_(card, 2, "SBPs%04x", qports->entry_length);
return -EINVAL;
@ -1554,9 +1577,14 @@ int qeth_bridgeport_an_set(struct qeth_card *card, int enable)
if (enable) {
qeth_bridge_emit_host_event(card, anev_reset, 0, NULL, NULL);
qeth_l2_set_pnso_mode(card, QETH_PNSO_BRIDGEPORT);
rc = qeth_l2_pnso(card, 1, qeth_bridgeport_an_set_cb, card);
} else
if (rc)
qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
} else {
rc = qeth_l2_pnso(card, 0, NULL, NULL);
qeth_l2_set_pnso_mode(card, QETH_PNSO_NONE);
}
return rc;
}

View File

@ -157,6 +157,7 @@ static ssize_t qeth_bridgeport_hostnotification_store(struct device *dev,
rc = -EBUSY;
else if (qeth_card_hw_is_reachable(card)) {
rc = qeth_bridgeport_an_set(card, enable);
/* sbp_lock ensures ordering vs notifications-stopped events */
if (!rc)
card->options.sbp.hostnotification = enable;
} else

View File

@ -314,7 +314,8 @@ static int qeth_l3_setdelip_cb(struct qeth_card *card, struct qeth_reply *reply,
}
static int qeth_l3_send_setdelmc(struct qeth_card *card,
struct qeth_ipaddr *addr, int ipacmd)
struct qeth_ipaddr *addr,
enum qeth_ipa_cmds ipacmd)
{
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;