forked from luck/tmp_suning_uos_patched
habanalabs: rename restore to ctx_switch when appropriate
This patch only does renaming of certain variables and structure members, and their accompanied comments. This is done to better reflect the actions these variables and members represent. There is no functional change in this patch. Signed-off-by: Oded Gabbay <oded.gabbay@gmail.com>
This commit is contained in:
parent
b2377e032f
commit
027d35d0b6
|
@ -601,7 +601,7 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
|
|||
void __user *chunks;
|
||||
u32 num_chunks;
|
||||
u64 cs_seq = ULONG_MAX;
|
||||
int rc, do_restore;
|
||||
int rc, do_ctx_switch;
|
||||
bool need_soft_reset = false;
|
||||
|
||||
if (hl_device_disabled_or_in_reset(hdev)) {
|
||||
|
@ -612,9 +612,9 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
|
|||
goto out;
|
||||
}
|
||||
|
||||
do_restore = atomic_cmpxchg(&ctx->thread_restore_token, 1, 0);
|
||||
do_ctx_switch = atomic_cmpxchg(&ctx->thread_ctx_switch_token, 1, 0);
|
||||
|
||||
if (do_restore || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) {
|
||||
if (do_ctx_switch || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) {
|
||||
long ret;
|
||||
|
||||
chunks = (void __user *)(uintptr_t)args->in.chunks_restore;
|
||||
|
@ -622,7 +622,7 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
|
|||
|
||||
mutex_lock(&hpriv->restore_phase_mutex);
|
||||
|
||||
if (do_restore) {
|
||||
if (do_ctx_switch) {
|
||||
rc = hdev->asic_funcs->context_switch(hdev, ctx->asid);
|
||||
if (rc) {
|
||||
dev_err_ratelimited(hdev->dev,
|
||||
|
@ -678,18 +678,18 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
|
|||
}
|
||||
}
|
||||
|
||||
ctx->thread_restore_wait_token = 1;
|
||||
} else if (!ctx->thread_restore_wait_token) {
|
||||
ctx->thread_ctx_switch_wait_token = 1;
|
||||
} else if (!ctx->thread_ctx_switch_wait_token) {
|
||||
u32 tmp;
|
||||
|
||||
rc = hl_poll_timeout_memory(hdev,
|
||||
(u64) (uintptr_t) &ctx->thread_restore_wait_token,
|
||||
(u64) (uintptr_t) &ctx->thread_ctx_switch_wait_token,
|
||||
jiffies_to_usecs(hdev->timeout_jiffies),
|
||||
&tmp);
|
||||
|
||||
if (rc || !tmp) {
|
||||
dev_err(hdev->dev,
|
||||
"restore phase hasn't finished in time\n");
|
||||
"context switch phase didn't finish in time\n");
|
||||
rc = -ETIMEDOUT;
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -106,8 +106,8 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
|
|||
|
||||
ctx->cs_sequence = 1;
|
||||
spin_lock_init(&ctx->cs_lock);
|
||||
atomic_set(&ctx->thread_restore_token, 1);
|
||||
ctx->thread_restore_wait_token = 0;
|
||||
atomic_set(&ctx->thread_ctx_switch_token, 1);
|
||||
ctx->thread_ctx_switch_wait_token = 0;
|
||||
|
||||
if (is_kernel_ctx) {
|
||||
ctx->asid = HL_KERNEL_ASID_ID; /* KMD gets ASID 0 */
|
||||
|
|
|
@ -710,10 +710,10 @@ int hl_device_reset(struct hl_device *hdev, bool hard_reset,
|
|||
for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++)
|
||||
hl_cq_reset(hdev, &hdev->completion_queue[i]);
|
||||
|
||||
/* Make sure the setup phase for the user context will run again */
|
||||
/* Make sure the context switch phase will run again */
|
||||
if (hdev->user_ctx) {
|
||||
atomic_set(&hdev->user_ctx->thread_restore_token, 1);
|
||||
hdev->user_ctx->thread_restore_wait_token = 0;
|
||||
atomic_set(&hdev->user_ctx->thread_ctx_switch_token, 1);
|
||||
hdev->user_ctx->thread_ctx_switch_wait_token = 0;
|
||||
}
|
||||
|
||||
/* Finished tear-down, starting to re-initialize */
|
||||
|
|
|
@ -615,12 +615,13 @@ struct hl_va_range {
|
|||
* DRAM mapping.
|
||||
* @cs_lock: spinlock to protect cs_sequence.
|
||||
* @dram_phys_mem: amount of used physical DRAM memory by this context.
|
||||
* @thread_restore_token: token to prevent multiple threads of the same context
|
||||
* from running the restore phase. Only one thread
|
||||
* should run it.
|
||||
* @thread_restore_wait_token: token to prevent the threads that didn't run
|
||||
* the restore phase from moving to their execution
|
||||
* phase before the restore phase has finished.
|
||||
* @thread_ctx_switch_token: token to prevent multiple threads of the same
|
||||
* context from running the context switch phase.
|
||||
* Only a single thread should run it.
|
||||
* @thread_ctx_switch_wait_token: token to prevent the threads that didn't run
|
||||
* the context switch phase from moving to their
|
||||
* execution phase before the context switch phase
|
||||
* has finished.
|
||||
* @asid: context's unique address space ID in the device's MMU.
|
||||
*/
|
||||
struct hl_ctx {
|
||||
|
@ -640,8 +641,8 @@ struct hl_ctx {
|
|||
u64 *dram_default_hops;
|
||||
spinlock_t cs_lock;
|
||||
atomic64_t dram_phys_mem;
|
||||
atomic_t thread_restore_token;
|
||||
u32 thread_restore_wait_token;
|
||||
atomic_t thread_ctx_switch_token;
|
||||
u32 thread_ctx_switch_wait_token;
|
||||
u32 asid;
|
||||
};
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user