forked from luck/tmp_suning_uos_patched
tls: Fix write space handling
TLS device cannot use the sw context. This patch returns the original
tls device write space handler and moves the sw/device specific portions
to the relevant files.
Also, we remove the write_space call for the tls_sw flow, because it
handles partial records in its delayed tx work handler.
Fixes: a42055e8d2
("net/tls: Add support for async encryption of records for performance")
Signed-off-by: Boris Pismenny <borisp@mellanox.com>
Reviewed-by: Eran Ben Elisha <eranbe@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
94850257cf
commit
7463d3a2db
|
@ -519,6 +519,9 @@ static inline bool tls_sw_has_ctx_tx(const struct sock *sk)
|
||||||
return !!tls_sw_ctx_tx(ctx);
|
return !!tls_sw_ctx_tx(ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void tls_sw_write_space(struct sock *sk, struct tls_context *ctx);
|
||||||
|
void tls_device_write_space(struct sock *sk, struct tls_context *ctx);
|
||||||
|
|
||||||
static inline struct tls_offload_context_rx *
|
static inline struct tls_offload_context_rx *
|
||||||
tls_offload_ctx_rx(const struct tls_context *tls_ctx)
|
tls_offload_ctx_rx(const struct tls_context *tls_ctx)
|
||||||
{
|
{
|
||||||
|
|
|
@ -546,6 +546,23 @@ static int tls_device_push_pending_record(struct sock *sk, int flags)
|
||||||
return tls_push_data(sk, &msg_iter, 0, flags, TLS_RECORD_TYPE_DATA);
|
return tls_push_data(sk, &msg_iter, 0, flags, TLS_RECORD_TYPE_DATA);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void tls_device_write_space(struct sock *sk, struct tls_context *ctx)
|
||||||
|
{
|
||||||
|
int rc = 0;
|
||||||
|
|
||||||
|
if (!sk->sk_write_pending && tls_is_partially_sent_record(ctx)) {
|
||||||
|
gfp_t sk_allocation = sk->sk_allocation;
|
||||||
|
|
||||||
|
sk->sk_allocation = GFP_ATOMIC;
|
||||||
|
rc = tls_push_partial_record(sk, ctx,
|
||||||
|
MSG_DONTWAIT | MSG_NOSIGNAL);
|
||||||
|
sk->sk_allocation = sk_allocation;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!rc)
|
||||||
|
ctx->sk_write_space(sk);
|
||||||
|
}
|
||||||
|
|
||||||
void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
|
void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn)
|
||||||
{
|
{
|
||||||
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
||||||
|
|
|
@ -212,7 +212,6 @@ int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
|
||||||
static void tls_write_space(struct sock *sk)
|
static void tls_write_space(struct sock *sk)
|
||||||
{
|
{
|
||||||
struct tls_context *ctx = tls_get_ctx(sk);
|
struct tls_context *ctx = tls_get_ctx(sk);
|
||||||
struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
|
|
||||||
|
|
||||||
/* If in_tcp_sendpages call lower protocol write space handler
|
/* If in_tcp_sendpages call lower protocol write space handler
|
||||||
* to ensure we wake up any waiting operations there. For example
|
* to ensure we wake up any waiting operations there. For example
|
||||||
|
@ -223,14 +222,12 @@ static void tls_write_space(struct sock *sk)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Schedule the transmission if tx list is ready */
|
#ifdef CONFIG_TLS_DEVICE
|
||||||
if (is_tx_ready(tx_ctx) && !sk->sk_write_pending) {
|
if (ctx->tx_conf == TLS_HW)
|
||||||
/* Schedule the transmission */
|
tls_device_write_space(sk, ctx);
|
||||||
if (!test_and_set_bit(BIT_TX_SCHEDULED, &tx_ctx->tx_bitmask))
|
else
|
||||||
schedule_delayed_work(&tx_ctx->tx_work.work, 0);
|
#endif
|
||||||
}
|
tls_sw_write_space(sk, ctx);
|
||||||
|
|
||||||
ctx->sk_write_space(sk);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tls_ctx_free(struct tls_context *ctx)
|
static void tls_ctx_free(struct tls_context *ctx)
|
||||||
|
|
|
@ -2126,6 +2126,19 @@ static void tx_work_handler(struct work_struct *work)
|
||||||
release_sock(sk);
|
release_sock(sk);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void tls_sw_write_space(struct sock *sk, struct tls_context *ctx)
|
||||||
|
{
|
||||||
|
struct tls_sw_context_tx *tx_ctx = tls_sw_ctx_tx(ctx);
|
||||||
|
|
||||||
|
/* Schedule the transmission if tx list is ready */
|
||||||
|
if (is_tx_ready(tx_ctx) && !sk->sk_write_pending) {
|
||||||
|
/* Schedule the transmission */
|
||||||
|
if (!test_and_set_bit(BIT_TX_SCHEDULED,
|
||||||
|
&tx_ctx->tx_bitmask))
|
||||||
|
schedule_delayed_work(&tx_ctx->tx_work.work, 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
|
int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
|
||||||
{
|
{
|
||||||
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
struct tls_context *tls_ctx = tls_get_ctx(sk);
|
||||||
|
|
Loading…
Reference in New Issue
Block a user