forked from luck/tmp_suning_uos_patched
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar: "The main changes in this cycle were: - Add the SYSTEM_SCHEDULING bootup state to move various scheduler debug checks earlier into the bootup. This turns silent and sporadically deadly bugs into nice, deterministic splats. Fix some of the splats that triggered. (Thomas Gleixner) - A round of restructuring and refactoring of the load-balancing and topology code (Peter Zijlstra) - Another round of consolidating ~20 of incremental scheduler code history: this time in terms of wait-queue nomenclature. (I didn't get much feedback on these renaming patches, and we can still easily change any names I might have misplaced, so if anyone hates a new name, please holler and I'll fix it.) (Ingo Molnar) - sched/numa improvements, fixes and updates (Rik van Riel) - Another round of x86/tsc scheduler clock code improvements, in hope of making it more robust (Peter Zijlstra) - Improve NOHZ behavior (Frederic Weisbecker) - Deadline scheduler improvements and fixes (Luca Abeni, Daniel Bristot de Oliveira) - Simplify and optimize the topology setup code (Lauro Ramos Venancio) - Debloat and decouple scheduler code some more (Nicolas Pitre) - Simplify code by making better use of llist primitives (Byungchul Park) - ... plus other fixes and improvements" * 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (103 commits) sched/cputime: Refactor the cputime_adjust() code sched/debug: Expose the number of RT/DL tasks that can migrate sched/numa: Hide numa_wake_affine() from UP build sched/fair: Remove effective_load() sched/numa: Implement NUMA node level wake_affine() sched/fair: Simplify wake_affine() for the single socket case sched/numa: Override part of migrate_degrades_locality() when idle balancing sched/rt: Move RT related code from sched/core.c to sched/rt.c sched/deadline: Move DL related code from sched/core.c to sched/deadline.c sched/cpuset: Only offer CONFIG_CPUSETS if SMP is enabled sched/fair: Spare idle load balancing on nohz_full CPUs nohz: Move idle balancer registration to the idle path sched/loadavg: Generalize "_idle" naming to "_nohz" sched/core: Drop the unused try_get_task_struct() helper function sched/fair: WARN() and refuse to set buddy when !se->on_rq sched/debug: Fix SCHED_WARN_ON() to return a value on !CONFIG_SCHED_DEBUG as well sched/wait: Disambiguate wq_entry->task_list and wq_head->task_list naming sched/wait: Move bit_wait_table[] and related functionality from sched/core.c to sched/wait_bit.c sched/wait: Split out the wait_bit*() APIs from <linux/wait.h> into <linux/wait_bit.h> sched/wait: Re-adjust macro line continuation backslashes in <linux/wait.h> ...
This commit is contained in:
commit
9bd42183b9
|
@ -819,7 +819,7 @@ printk(KERN_INFO "my ip: %pI4\n", &ipaddress);
|
|||
certain condition is true. They must be used carefully to ensure
|
||||
there is no race condition. You declare a
|
||||
<type>wait_queue_head_t</type>, and then processes which want to
|
||||
wait for that condition declare a <type>wait_queue_t</type>
|
||||
wait for that condition declare a <type>wait_queue_entry_t</type>
|
||||
referring to themselves, and place that in the queue.
|
||||
</para>
|
||||
|
||||
|
|
|
@ -316,7 +316,7 @@ For version 5, the format of the message is:
|
|||
struct autofs_v5_packet {
|
||||
int proto_version; /* Protocol version */
|
||||
int type; /* Type of packet */
|
||||
autofs_wqt_t wait_queue_token;
|
||||
autofs_wqt_t wait_queue_entry_token;
|
||||
__u32 dev;
|
||||
__u64 ino;
|
||||
__u32 uid;
|
||||
|
@ -341,12 +341,12 @@ The pipe will be set to "packet mode" (equivalent to passing
|
|||
`O_DIRECT`) to _pipe2(2)_ so that a read from the pipe will return at
|
||||
most one packet, and any unread portion of a packet will be discarded.
|
||||
|
||||
The `wait_queue_token` is a unique number which can identify a
|
||||
The `wait_queue_entry_token` is a unique number which can identify a
|
||||
particular request to be acknowledged. When a message is sent over
|
||||
the pipe the affected dentry is marked as either "active" or
|
||||
"expiring" and other accesses to it block until the message is
|
||||
acknowledged using one of the ioctls below and the relevant
|
||||
`wait_queue_token`.
|
||||
`wait_queue_entry_token`.
|
||||
|
||||
Communicating with autofs: root directory ioctls
|
||||
------------------------------------------------
|
||||
|
@ -358,7 +358,7 @@ capability, or must be the automount daemon.
|
|||
The available ioctl commands are:
|
||||
|
||||
- **AUTOFS_IOC_READY**: a notification has been handled. The argument
|
||||
to the ioctl command is the "wait_queue_token" number
|
||||
to the ioctl command is the "wait_queue_entry_token" number
|
||||
corresponding to the notification being acknowledged.
|
||||
- **AUTOFS_IOC_FAIL**: similar to above, but indicates failure with
|
||||
the error code `ENOENT`.
|
||||
|
@ -382,14 +382,14 @@ The available ioctl commands are:
|
|||
struct autofs_packet_expire_multi {
|
||||
int proto_version; /* Protocol version */
|
||||
int type; /* Type of packet */
|
||||
autofs_wqt_t wait_queue_token;
|
||||
autofs_wqt_t wait_queue_entry_token;
|
||||
int len;
|
||||
char name[NAME_MAX+1];
|
||||
};
|
||||
|
||||
is required. This is filled in with the name of something
|
||||
that can be unmounted or removed. If nothing can be expired,
|
||||
`errno` is set to `EAGAIN`. Even though a `wait_queue_token`
|
||||
`errno` is set to `EAGAIN`. Even though a `wait_queue_entry_token`
|
||||
is present in the structure, no "wait queue" is established
|
||||
and no acknowledgment is needed.
|
||||
- **AUTOFS_IOC_EXPIRE_MULTI**: This is similar to
|
||||
|
|
|
@ -7,6 +7,8 @@ CONTENTS
|
|||
0. WARNING
|
||||
1. Overview
|
||||
2. Scheduling algorithm
|
||||
2.1 Main algorithm
|
||||
2.2 Bandwidth reclaiming
|
||||
3. Scheduling Real-Time Tasks
|
||||
3.1 Definitions
|
||||
3.2 Schedulability Analysis for Uniprocessor Systems
|
||||
|
@ -44,6 +46,9 @@ CONTENTS
|
|||
2. Scheduling algorithm
|
||||
==================
|
||||
|
||||
2.1 Main algorithm
|
||||
------------------
|
||||
|
||||
SCHED_DEADLINE uses three parameters, named "runtime", "period", and
|
||||
"deadline", to schedule tasks. A SCHED_DEADLINE task should receive
|
||||
"runtime" microseconds of execution time every "period" microseconds, and
|
||||
|
@ -113,6 +118,160 @@ CONTENTS
|
|||
remaining runtime = remaining runtime + runtime
|
||||
|
||||
|
||||
2.2 Bandwidth reclaiming
|
||||
------------------------
|
||||
|
||||
Bandwidth reclaiming for deadline tasks is based on the GRUB (Greedy
|
||||
Reclamation of Unused Bandwidth) algorithm [15, 16, 17] and it is enabled
|
||||
when flag SCHED_FLAG_RECLAIM is set.
|
||||
|
||||
The following diagram illustrates the state names for tasks handled by GRUB:
|
||||
|
||||
------------
|
||||
(d) | Active |
|
||||
------------->| |
|
||||
| | Contending |
|
||||
| ------------
|
||||
| A |
|
||||
---------- | |
|
||||
| | | |
|
||||
| Inactive | |(b) | (a)
|
||||
| | | |
|
||||
---------- | |
|
||||
A | V
|
||||
| ------------
|
||||
| | Active |
|
||||
--------------| Non |
|
||||
(c) | Contending |
|
||||
------------
|
||||
|
||||
A task can be in one of the following states:
|
||||
|
||||
- ActiveContending: if it is ready for execution (or executing);
|
||||
|
||||
- ActiveNonContending: if it just blocked and has not yet surpassed the 0-lag
|
||||
time;
|
||||
|
||||
- Inactive: if it is blocked and has surpassed the 0-lag time.
|
||||
|
||||
State transitions:
|
||||
|
||||
(a) When a task blocks, it does not become immediately inactive since its
|
||||
bandwidth cannot be immediately reclaimed without breaking the
|
||||
real-time guarantees. It therefore enters a transitional state called
|
||||
ActiveNonContending. The scheduler arms the "inactive timer" to fire at
|
||||
the 0-lag time, when the task's bandwidth can be reclaimed without
|
||||
breaking the real-time guarantees.
|
||||
|
||||
The 0-lag time for a task entering the ActiveNonContending state is
|
||||
computed as
|
||||
|
||||
(runtime * dl_period)
|
||||
deadline - ---------------------
|
||||
dl_runtime
|
||||
|
||||
where runtime is the remaining runtime, while dl_runtime and dl_period
|
||||
are the reservation parameters.
|
||||
|
||||
(b) If the task wakes up before the inactive timer fires, the task re-enters
|
||||
the ActiveContending state and the "inactive timer" is canceled.
|
||||
In addition, if the task wakes up on a different runqueue, then
|
||||
the task's utilization must be removed from the previous runqueue's active
|
||||
utilization and must be added to the new runqueue's active utilization.
|
||||
In order to avoid races between a task waking up on a runqueue while the
|
||||
"inactive timer" is running on a different CPU, the "dl_non_contending"
|
||||
flag is used to indicate that a task is not on a runqueue but is active
|
||||
(so, the flag is set when the task blocks and is cleared when the
|
||||
"inactive timer" fires or when the task wakes up).
|
||||
|
||||
(c) When the "inactive timer" fires, the task enters the Inactive state and
|
||||
its utilization is removed from the runqueue's active utilization.
|
||||
|
||||
(d) When an inactive task wakes up, it enters the ActiveContending state and
|
||||
its utilization is added to the active utilization of the runqueue where
|
||||
it has been enqueued.
|
||||
|
||||
For each runqueue, the algorithm GRUB keeps track of two different bandwidths:
|
||||
|
||||
- Active bandwidth (running_bw): this is the sum of the bandwidths of all
|
||||
tasks in active state (i.e., ActiveContending or ActiveNonContending);
|
||||
|
||||
- Total bandwidth (this_bw): this is the sum of all tasks "belonging" to the
|
||||
runqueue, including the tasks in Inactive state.
|
||||
|
||||
|
||||
The algorithm reclaims the bandwidth of the tasks in Inactive state.
|
||||
It does so by decrementing the runtime of the executing task Ti at a pace equal
|
||||
to
|
||||
|
||||
dq = -max{ Ui, (1 - Uinact) } dt
|
||||
|
||||
where Uinact is the inactive utilization, computed as (this_bq - running_bw),
|
||||
and Ui is the bandwidth of task Ti.
|
||||
|
||||
|
||||
Let's now see a trivial example of two deadline tasks with runtime equal
|
||||
to 4 and period equal to 8 (i.e., bandwidth equal to 0.5):
|
||||
|
||||
A Task T1
|
||||
|
|
||||
| |
|
||||
| |
|
||||
|-------- |----
|
||||
| | V
|
||||
|---|---|---|---|---|---|---|---|--------->t
|
||||
0 1 2 3 4 5 6 7 8
|
||||
|
||||
|
||||
A Task T2
|
||||
|
|
||||
| |
|
||||
| |
|
||||
| ------------------------|
|
||||
| | V
|
||||
|---|---|---|---|---|---|---|---|--------->t
|
||||
0 1 2 3 4 5 6 7 8
|
||||
|
||||
|
||||
A running_bw
|
||||
|
|
||||
1 ----------------- ------
|
||||
| | |
|
||||
0.5- -----------------
|
||||
| |
|
||||
|---|---|---|---|---|---|---|---|--------->t
|
||||
0 1 2 3 4 5 6 7 8
|
||||
|
||||
|
||||
- Time t = 0:
|
||||
|
||||
Both tasks are ready for execution and therefore in ActiveContending state.
|
||||
Suppose Task T1 is the first task to start execution.
|
||||
Since there are no inactive tasks, its runtime is decreased as dq = -1 dt.
|
||||
|
||||
- Time t = 2:
|
||||
|
||||
Suppose that task T1 blocks
|
||||
Task T1 therefore enters the ActiveNonContending state. Since its remaining
|
||||
runtime is equal to 2, its 0-lag time is equal to t = 4.
|
||||
Task T2 start execution, with runtime still decreased as dq = -1 dt since
|
||||
there are no inactive tasks.
|
||||
|
||||
- Time t = 4:
|
||||
|
||||
This is the 0-lag time for Task T1. Since it didn't woken up in the
|
||||
meantime, it enters the Inactive state. Its bandwidth is removed from
|
||||
running_bw.
|
||||
Task T2 continues its execution. However, its runtime is now decreased as
|
||||
dq = - 0.5 dt because Uinact = 0.5.
|
||||
Task T2 therefore reclaims the bandwidth unused by Task T1.
|
||||
|
||||
- Time t = 8:
|
||||
|
||||
Task T1 wakes up. It enters the ActiveContending state again, and the
|
||||
running_bw is incremented.
|
||||
|
||||
|
||||
3. Scheduling Real-Time Tasks
|
||||
=============================
|
||||
|
||||
|
@ -330,6 +489,15 @@ CONTENTS
|
|||
14 - J. Erickson, U. Devi and S. Baruah. Improved tardiness bounds for
|
||||
Global EDF. Proceedings of the 22nd Euromicro Conference on
|
||||
Real-Time Systems, 2010.
|
||||
15 - G. Lipari, S. Baruah, Greedy reclamation of unused bandwidth in
|
||||
constant-bandwidth servers, 12th IEEE Euromicro Conference on Real-Time
|
||||
Systems, 2000.
|
||||
16 - L. Abeni, J. Lelli, C. Scordino, L. Palopoli, Greedy CPU reclaiming for
|
||||
SCHED DEADLINE. In Proceedings of the Real-Time Linux Workshop (RTLWS),
|
||||
Dusseldorf, Germany, 2014.
|
||||
17 - L. Abeni, G. Lipari, A. Parri, Y. Sun, Multicore CPU reclaiming: parallel
|
||||
or sequential?. In Proceedings of the 31st Annual ACM Symposium on Applied
|
||||
Computing, 2016.
|
||||
|
||||
|
||||
4. Bandwidth management
|
||||
|
|
|
@ -1609,7 +1609,7 @@ Doing the same with chrt -r 5 and function-trace set.
|
|||
<idle>-0 3dN.2 14us : sched_avg_update <-__cpu_load_update
|
||||
<idle>-0 3dN.2 14us : _raw_spin_unlock <-cpu_load_update_nohz
|
||||
<idle>-0 3dN.2 14us : sub_preempt_count <-_raw_spin_unlock
|
||||
<idle>-0 3dN.1 15us : calc_load_exit_idle <-tick_nohz_idle_exit
|
||||
<idle>-0 3dN.1 15us : calc_load_nohz_stop <-tick_nohz_idle_exit
|
||||
<idle>-0 3dN.1 15us : touch_softlockup_watchdog <-tick_nohz_idle_exit
|
||||
<idle>-0 3dN.1 15us : hrtimer_cancel <-tick_nohz_idle_exit
|
||||
<idle>-0 3dN.1 15us : hrtimer_try_to_cancel <-hrtimer_cancel
|
||||
|
|
|
@ -555,8 +555,7 @@ static DEFINE_RAW_SPINLOCK(stop_lock);
|
|||
*/
|
||||
static void ipi_cpu_stop(unsigned int cpu)
|
||||
{
|
||||
if (system_state == SYSTEM_BOOTING ||
|
||||
system_state == SYSTEM_RUNNING) {
|
||||
if (system_state <= SYSTEM_RUNNING) {
|
||||
raw_spin_lock(&stop_lock);
|
||||
pr_crit("CPU%u: stopping\n", cpu);
|
||||
dump_stack();
|
||||
|
|
|
@ -961,8 +961,7 @@ void smp_send_stop(void)
|
|||
cpumask_copy(&mask, cpu_online_mask);
|
||||
cpumask_clear_cpu(smp_processor_id(), &mask);
|
||||
|
||||
if (system_state == SYSTEM_BOOTING ||
|
||||
system_state == SYSTEM_RUNNING)
|
||||
if (system_state <= SYSTEM_RUNNING)
|
||||
pr_crit("SMP: stopping secondary CPUs\n");
|
||||
smp_cross_call(&mask, IPI_CPU_STOP);
|
||||
}
|
||||
|
|
|
@ -567,8 +567,7 @@ static void stop_this_cpu(void *data)
|
|||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
|
||||
if (system_state == SYSTEM_BOOTING ||
|
||||
system_state == SYSTEM_RUNNING) {
|
||||
if (system_state <= SYSTEM_RUNNING) {
|
||||
spin_lock(&stop_lock);
|
||||
pr_crit("CPU%u: stopping\n", cpu);
|
||||
dump_stack();
|
||||
|
|
|
@ -97,7 +97,7 @@ int smp_generic_cpu_bootable(unsigned int nr)
|
|||
/* Special case - we inhibit secondary thread startup
|
||||
* during boot if the user requests it.
|
||||
*/
|
||||
if (system_state == SYSTEM_BOOTING && cpu_has_feature(CPU_FTR_SMT)) {
|
||||
if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) {
|
||||
if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
|
||||
return 0;
|
||||
if (smt_enabled_at_boot
|
||||
|
|
|
@ -2265,7 +2265,7 @@ static struct pmu pmu = {
|
|||
void arch_perf_update_userpage(struct perf_event *event,
|
||||
struct perf_event_mmap_page *userpg, u64 now)
|
||||
{
|
||||
struct cyc2ns_data *data;
|
||||
struct cyc2ns_data data;
|
||||
u64 offset;
|
||||
|
||||
userpg->cap_user_time = 0;
|
||||
|
@ -2277,17 +2277,17 @@ void arch_perf_update_userpage(struct perf_event *event,
|
|||
if (!using_native_sched_clock() || !sched_clock_stable())
|
||||
return;
|
||||
|
||||
data = cyc2ns_read_begin();
|
||||
cyc2ns_read_begin(&data);
|
||||
|
||||
offset = data->cyc2ns_offset + __sched_clock_offset;
|
||||
offset = data.cyc2ns_offset + __sched_clock_offset;
|
||||
|
||||
/*
|
||||
* Internal timekeeping for enabled/running/stopped times
|
||||
* is always in the local_clock domain.
|
||||
*/
|
||||
userpg->cap_user_time = 1;
|
||||
userpg->time_mult = data->cyc2ns_mul;
|
||||
userpg->time_shift = data->cyc2ns_shift;
|
||||
userpg->time_mult = data.cyc2ns_mul;
|
||||
userpg->time_shift = data.cyc2ns_shift;
|
||||
userpg->time_offset = offset - now;
|
||||
|
||||
/*
|
||||
|
@ -2299,7 +2299,7 @@ void arch_perf_update_userpage(struct perf_event *event,
|
|||
userpg->time_zero = offset;
|
||||
}
|
||||
|
||||
cyc2ns_read_end(data);
|
||||
cyc2ns_read_end();
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -29,11 +29,9 @@ struct cyc2ns_data {
|
|||
u32 cyc2ns_mul;
|
||||
u32 cyc2ns_shift;
|
||||
u64 cyc2ns_offset;
|
||||
u32 __count;
|
||||
/* u32 hole */
|
||||
}; /* 24 bytes -- do not grow */
|
||||
}; /* 16 bytes */
|
||||
|
||||
extern struct cyc2ns_data *cyc2ns_read_begin(void);
|
||||
extern void cyc2ns_read_end(struct cyc2ns_data *);
|
||||
extern void cyc2ns_read_begin(struct cyc2ns_data *);
|
||||
extern void cyc2ns_read_end(void);
|
||||
|
||||
#endif /* _ASM_X86_TIMER_H */
|
||||
|
|
|
@ -863,7 +863,7 @@ static void announce_cpu(int cpu, int apicid)
|
|||
if (cpu == 1)
|
||||
printk(KERN_INFO "x86: Booting SMP configuration:\n");
|
||||
|
||||
if (system_state == SYSTEM_BOOTING) {
|
||||
if (system_state < SYSTEM_RUNNING) {
|
||||
if (node != current_node) {
|
||||
if (current_node > (-1))
|
||||
pr_cont("\n");
|
||||
|
|
|
@ -51,115 +51,34 @@ static u32 art_to_tsc_denominator;
|
|||
static u64 art_to_tsc_offset;
|
||||
struct clocksource *art_related_clocksource;
|
||||
|
||||
/*
|
||||
* Use a ring-buffer like data structure, where a writer advances the head by
|
||||
* writing a new data entry and a reader advances the tail when it observes a
|
||||
* new entry.
|
||||
*
|
||||
* Writers are made to wait on readers until there's space to write a new
|
||||
* entry.
|
||||
*
|
||||
* This means that we can always use an {offset, mul} pair to compute a ns
|
||||
* value that is 'roughly' in the right direction, even if we're writing a new
|
||||
* {offset, mul} pair during the clock read.
|
||||
*
|
||||
* The down-side is that we can no longer guarantee strict monotonicity anymore
|
||||
* (assuming the TSC was that to begin with), because while we compute the
|
||||
* intersection point of the two clock slopes and make sure the time is
|
||||
* continuous at the point of switching; we can no longer guarantee a reader is
|
||||
* strictly before or after the switch point.
|
||||
*
|
||||
* It does mean a reader no longer needs to disable IRQs in order to avoid
|
||||
* CPU-Freq updates messing with his times, and similarly an NMI reader will
|
||||
* no longer run the risk of hitting half-written state.
|
||||
*/
|
||||
|
||||
struct cyc2ns {
|
||||
struct cyc2ns_data data[2]; /* 0 + 2*24 = 48 */
|
||||
struct cyc2ns_data *head; /* 48 + 8 = 56 */
|
||||
struct cyc2ns_data *tail; /* 56 + 8 = 64 */
|
||||
}; /* exactly fits one cacheline */
|
||||
struct cyc2ns_data data[2]; /* 0 + 2*16 = 32 */
|
||||
seqcount_t seq; /* 32 + 4 = 36 */
|
||||
|
||||
}; /* fits one cacheline */
|
||||
|
||||
static DEFINE_PER_CPU_ALIGNED(struct cyc2ns, cyc2ns);
|
||||
|
||||
struct cyc2ns_data *cyc2ns_read_begin(void)
|
||||
void cyc2ns_read_begin(struct cyc2ns_data *data)
|
||||
{
|
||||
struct cyc2ns_data *head;
|
||||
int seq, idx;
|
||||
|
||||
preempt_disable();
|
||||
preempt_disable_notrace();
|
||||
|
||||
head = this_cpu_read(cyc2ns.head);
|
||||
/*
|
||||
* Ensure we observe the entry when we observe the pointer to it.
|
||||
* matches the wmb from cyc2ns_write_end().
|
||||
*/
|
||||
smp_read_barrier_depends();
|
||||
head->__count++;
|
||||
barrier();
|
||||
do {
|
||||
seq = this_cpu_read(cyc2ns.seq.sequence);
|
||||
idx = seq & 1;
|
||||
|
||||
return head;
|
||||
data->cyc2ns_offset = this_cpu_read(cyc2ns.data[idx].cyc2ns_offset);
|
||||
data->cyc2ns_mul = this_cpu_read(cyc2ns.data[idx].cyc2ns_mul);
|
||||
data->cyc2ns_shift = this_cpu_read(cyc2ns.data[idx].cyc2ns_shift);
|
||||
|
||||
} while (unlikely(seq != this_cpu_read(cyc2ns.seq.sequence)));
|
||||
}
|
||||
|
||||
void cyc2ns_read_end(struct cyc2ns_data *head)
|
||||
void cyc2ns_read_end(void)
|
||||
{
|
||||
barrier();
|
||||
/*
|
||||
* If we're the outer most nested read; update the tail pointer
|
||||
* when we're done. This notifies possible pending writers
|
||||
* that we've observed the head pointer and that the other
|
||||
* entry is now free.
|
||||
*/
|
||||
if (!--head->__count) {
|
||||
/*
|
||||
* x86-TSO does not reorder writes with older reads;
|
||||
* therefore once this write becomes visible to another
|
||||
* cpu, we must be finished reading the cyc2ns_data.
|
||||
*
|
||||
* matches with cyc2ns_write_begin().
|
||||
*/
|
||||
this_cpu_write(cyc2ns.tail, head);
|
||||
}
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
/*
|
||||
* Begin writing a new @data entry for @cpu.
|
||||
*
|
||||
* Assumes some sort of write side lock; currently 'provided' by the assumption
|
||||
* that cpufreq will call its notifiers sequentially.
|
||||
*/
|
||||
static struct cyc2ns_data *cyc2ns_write_begin(int cpu)
|
||||
{
|
||||
struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu);
|
||||
struct cyc2ns_data *data = c2n->data;
|
||||
|
||||
if (data == c2n->head)
|
||||
data++;
|
||||
|
||||
/* XXX send an IPI to @cpu in order to guarantee a read? */
|
||||
|
||||
/*
|
||||
* When we observe the tail write from cyc2ns_read_end(),
|
||||
* the cpu must be done with that entry and its safe
|
||||
* to start writing to it.
|
||||
*/
|
||||
while (c2n->tail == data)
|
||||
cpu_relax();
|
||||
|
||||
return data;
|
||||
}
|
||||
|
||||
static void cyc2ns_write_end(int cpu, struct cyc2ns_data *data)
|
||||
{
|
||||
struct cyc2ns *c2n = &per_cpu(cyc2ns, cpu);
|
||||
|
||||
/*
|
||||
* Ensure the @data writes are visible before we publish the
|
||||
* entry. Matches the data-depencency in cyc2ns_read_begin().
|
||||
*/
|
||||
smp_wmb();
|
||||
|
||||
ACCESS_ONCE(c2n->head) = data;
|
||||
preempt_enable_notrace();
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -191,7 +110,6 @@ static void cyc2ns_data_init(struct cyc2ns_data *data)
|
|||
data->cyc2ns_mul = 0;
|
||||
data->cyc2ns_shift = 0;
|
||||
data->cyc2ns_offset = 0;
|
||||
data->__count = 0;
|
||||
}
|
||||
|
||||
static void cyc2ns_init(int cpu)
|
||||
|
@ -201,51 +119,29 @@ static void cyc2ns_init(int cpu)
|
|||
cyc2ns_data_init(&c2n->data[0]);
|
||||
cyc2ns_data_init(&c2n->data[1]);
|
||||
|
||||
c2n->head = c2n->data;
|
||||
c2n->tail = c2n->data;
|
||||
seqcount_init(&c2n->seq);
|
||||
}
|
||||
|
||||
static inline unsigned long long cycles_2_ns(unsigned long long cyc)
|
||||
{
|
||||
struct cyc2ns_data *data, *tail;
|
||||
struct cyc2ns_data data;
|
||||
unsigned long long ns;
|
||||
|
||||
/*
|
||||
* See cyc2ns_read_*() for details; replicated in order to avoid
|
||||
* an extra few instructions that came with the abstraction.
|
||||
* Notable, it allows us to only do the __count and tail update
|
||||
* dance when its actually needed.
|
||||
*/
|
||||
cyc2ns_read_begin(&data);
|
||||
|
||||
preempt_disable_notrace();
|
||||
data = this_cpu_read(cyc2ns.head);
|
||||
tail = this_cpu_read(cyc2ns.tail);
|
||||
ns = data.cyc2ns_offset;
|
||||
ns += mul_u64_u32_shr(cyc, data.cyc2ns_mul, data.cyc2ns_shift);
|
||||
|
||||
if (likely(data == tail)) {
|
||||
ns = data->cyc2ns_offset;
|
||||
ns += mul_u64_u32_shr(cyc, data->cyc2ns_mul, data->cyc2ns_shift);
|
||||
} else {
|
||||
data->__count++;
|
||||
|
||||
barrier();
|
||||
|
||||
ns = data->cyc2ns_offset;
|
||||
ns += mul_u64_u32_shr(cyc, data->cyc2ns_mul, data->cyc2ns_shift);
|
||||
|
||||
barrier();
|
||||
|
||||
if (!--data->__count)
|
||||
this_cpu_write(cyc2ns.tail, data);
|
||||
}
|
||||
preempt_enable_notrace();
|
||||
cyc2ns_read_end();
|
||||
|
||||
return ns;
|
||||
}
|
||||
|
||||
static void set_cyc2ns_scale(unsigned long khz, int cpu)
|
||||
static void set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
|
||||
{
|
||||
unsigned long long tsc_now, ns_now;
|
||||
struct cyc2ns_data *data;
|
||||
unsigned long long ns_now;
|
||||
struct cyc2ns_data data;
|
||||
struct cyc2ns *c2n;
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
@ -254,9 +150,6 @@ static void set_cyc2ns_scale(unsigned long khz, int cpu)
|
|||
if (!khz)
|
||||
goto done;
|
||||
|
||||
data = cyc2ns_write_begin(cpu);
|
||||
|
||||
tsc_now = rdtsc();
|
||||
ns_now = cycles_2_ns(tsc_now);
|
||||
|
||||
/*
|
||||
|
@ -264,7 +157,7 @@ static void set_cyc2ns_scale(unsigned long khz, int cpu)
|
|||
* time function is continuous; see the comment near struct
|
||||
* cyc2ns_data.
|
||||
*/
|
||||
clocks_calc_mult_shift(&data->cyc2ns_mul, &data->cyc2ns_shift, khz,
|
||||
clocks_calc_mult_shift(&data.cyc2ns_mul, &data.cyc2ns_shift, khz,
|
||||
NSEC_PER_MSEC, 0);
|
||||
|
||||
/*
|
||||
|
@ -273,20 +166,26 @@ static void set_cyc2ns_scale(unsigned long khz, int cpu)
|
|||
* conversion algorithm shifting a 32-bit value (now specifies a 64-bit
|
||||
* value) - refer perf_event_mmap_page documentation in perf_event.h.
|
||||
*/
|
||||
if (data->cyc2ns_shift == 32) {
|
||||
data->cyc2ns_shift = 31;
|
||||
data->cyc2ns_mul >>= 1;
|
||||
if (data.cyc2ns_shift == 32) {
|
||||
data.cyc2ns_shift = 31;
|
||||
data.cyc2ns_mul >>= 1;
|
||||
}
|
||||
|
||||
data->cyc2ns_offset = ns_now -
|
||||
mul_u64_u32_shr(tsc_now, data->cyc2ns_mul, data->cyc2ns_shift);
|
||||
data.cyc2ns_offset = ns_now -
|
||||
mul_u64_u32_shr(tsc_now, data.cyc2ns_mul, data.cyc2ns_shift);
|
||||
|
||||
cyc2ns_write_end(cpu, data);
|
||||
c2n = per_cpu_ptr(&cyc2ns, cpu);
|
||||
|
||||
raw_write_seqcount_latch(&c2n->seq);
|
||||
c2n->data[0] = data;
|
||||
raw_write_seqcount_latch(&c2n->seq);
|
||||
c2n->data[1] = data;
|
||||
|
||||
done:
|
||||
sched_clock_idle_wakeup_event(0);
|
||||
sched_clock_idle_wakeup_event();
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* Scheduler clock - returns current time in nanosec units.
|
||||
*/
|
||||
|
@ -374,6 +273,8 @@ static int __init tsc_setup(char *str)
|
|||
tsc_clocksource_reliable = 1;
|
||||
if (!strncmp(str, "noirqtime", 9))
|
||||
no_sched_irq_time = 1;
|
||||
if (!strcmp(str, "unstable"))
|
||||
mark_tsc_unstable("boot parameter");
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -986,7 +887,6 @@ void tsc_restore_sched_clock_state(void)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_CPU_FREQ
|
||||
|
||||
/* Frequency scaling support. Adjust the TSC based timer when the cpu frequency
|
||||
* changes.
|
||||
*
|
||||
|
@ -1027,7 +927,7 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
|
|||
if (!(freq->flags & CPUFREQ_CONST_LOOPS))
|
||||
mark_tsc_unstable("cpufreq changes");
|
||||
|
||||
set_cyc2ns_scale(tsc_khz, freq->cpu);
|
||||
set_cyc2ns_scale(tsc_khz, freq->cpu, rdtsc());
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1127,6 +1027,15 @@ static void tsc_cs_mark_unstable(struct clocksource *cs)
|
|||
pr_info("Marking TSC unstable due to clocksource watchdog\n");
|
||||
}
|
||||
|
||||
static void tsc_cs_tick_stable(struct clocksource *cs)
|
||||
{
|
||||
if (tsc_unstable)
|
||||
return;
|
||||
|
||||
if (using_native_sched_clock())
|
||||
sched_clock_tick_stable();
|
||||
}
|
||||
|
||||
/*
|
||||
* .mask MUST be CLOCKSOURCE_MASK(64). See comment above read_tsc()
|
||||
*/
|
||||
|
@ -1140,6 +1049,7 @@ static struct clocksource clocksource_tsc = {
|
|||
.archdata = { .vclock_mode = VCLOCK_TSC },
|
||||
.resume = tsc_resume,
|
||||
.mark_unstable = tsc_cs_mark_unstable,
|
||||
.tick_stable = tsc_cs_tick_stable,
|
||||
};
|
||||
|
||||
void mark_tsc_unstable(char *reason)
|
||||
|
@ -1255,6 +1165,7 @@ static void tsc_refine_calibration_work(struct work_struct *work)
|
|||
static int hpet;
|
||||
u64 tsc_stop, ref_stop, delta;
|
||||
unsigned long freq;
|
||||
int cpu;
|
||||
|
||||
/* Don't bother refining TSC on unstable systems */
|
||||
if (check_tsc_unstable())
|
||||
|
@ -1305,6 +1216,10 @@ static void tsc_refine_calibration_work(struct work_struct *work)
|
|||
/* Inform the TSC deadline clockevent devices about the recalibration */
|
||||
lapic_update_tsc_freq();
|
||||
|
||||
/* Update the sched_clock() rate to match the clocksource one */
|
||||
for_each_possible_cpu(cpu)
|
||||
set_cyc2ns_scale(tsc_khz, cpu, tsc_stop);
|
||||
|
||||
out:
|
||||
if (boot_cpu_has(X86_FEATURE_ART))
|
||||
art_related_clocksource = &clocksource_tsc;
|
||||
|
@ -1350,7 +1265,7 @@ device_initcall(init_tsc_clocksource);
|
|||
|
||||
void __init tsc_init(void)
|
||||
{
|
||||
u64 lpj;
|
||||
u64 lpj, cyc;
|
||||
int cpu;
|
||||
|
||||
if (!boot_cpu_has(X86_FEATURE_TSC)) {
|
||||
|
@ -1390,9 +1305,10 @@ void __init tsc_init(void)
|
|||
* speed as the bootup CPU. (cpufreq notifiers will fix this
|
||||
* up if their speed diverges)
|
||||
*/
|
||||
cyc = rdtsc();
|
||||
for_each_possible_cpu(cpu) {
|
||||
cyc2ns_init(cpu);
|
||||
set_cyc2ns_scale(tsc_khz, cpu);
|
||||
set_cyc2ns_scale(tsc_khz, cpu, cyc);
|
||||
}
|
||||
|
||||
if (tsc_disabled > 0)
|
||||
|
|
|
@ -456,12 +456,13 @@ static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp)
|
|||
*/
|
||||
static inline unsigned long long cycles_2_ns(unsigned long long cyc)
|
||||
{
|
||||
struct cyc2ns_data *data = cyc2ns_read_begin();
|
||||
struct cyc2ns_data data;
|
||||
unsigned long long ns;
|
||||
|
||||
ns = mul_u64_u32_shr(cyc, data->cyc2ns_mul, data->cyc2ns_shift);
|
||||
cyc2ns_read_begin(&data);
|
||||
ns = mul_u64_u32_shr(cyc, data.cyc2ns_mul, data.cyc2ns_shift);
|
||||
cyc2ns_read_end();
|
||||
|
||||
cyc2ns_read_end(data);
|
||||
return ns;
|
||||
}
|
||||
|
||||
|
@ -470,12 +471,13 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc)
|
|||
*/
|
||||
static inline unsigned long long ns_2_cycles(unsigned long long ns)
|
||||
{
|
||||
struct cyc2ns_data *data = cyc2ns_read_begin();
|
||||
struct cyc2ns_data data;
|
||||
unsigned long long cyc;
|
||||
|
||||
cyc = (ns << data->cyc2ns_shift) / data->cyc2ns_mul;
|
||||
cyc2ns_read_begin(&data);
|
||||
cyc = (ns << data.cyc2ns_shift) / data.cyc2ns_mul;
|
||||
cyc2ns_read_end();
|
||||
|
||||
cyc2ns_read_end(data);
|
||||
return cyc;
|
||||
}
|
||||
|
||||
|
|
|
@ -941,14 +941,14 @@ static bool reorder_tags_to_front(struct list_head *list)
|
|||
return first != NULL;
|
||||
}
|
||||
|
||||
static int blk_mq_dispatch_wake(wait_queue_t *wait, unsigned mode, int flags,
|
||||
static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, int flags,
|
||||
void *key)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
|
||||
hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
|
||||
|
||||
list_del(&wait->task_list);
|
||||
list_del(&wait->entry);
|
||||
clear_bit_unlock(BLK_MQ_S_TAG_WAITING, &hctx->state);
|
||||
blk_mq_run_hw_queue(hctx, true);
|
||||
return 1;
|
||||
|
|
|
@ -503,7 +503,7 @@ static inline unsigned int get_limit(struct rq_wb *rwb, unsigned long rw)
|
|||
}
|
||||
|
||||
static inline bool may_queue(struct rq_wb *rwb, struct rq_wait *rqw,
|
||||
wait_queue_t *wait, unsigned long rw)
|
||||
wait_queue_entry_t *wait, unsigned long rw)
|
||||
{
|
||||
/*
|
||||
* inc it here even if disabled, since we'll dec it at completion.
|
||||
|
@ -520,7 +520,7 @@ static inline bool may_queue(struct rq_wb *rwb, struct rq_wait *rqw,
|
|||
* in line to be woken up, wait for our turn.
|
||||
*/
|
||||
if (waitqueue_active(&rqw->wait) &&
|
||||
rqw->wait.task_list.next != &wait->task_list)
|
||||
rqw->wait.head.next != &wait->entry)
|
||||
return false;
|
||||
|
||||
return atomic_inc_below(&rqw->inflight, get_limit(rwb, rw));
|
||||
|
|
|
@ -99,7 +99,7 @@ struct kyber_hctx_data {
|
|||
struct list_head rqs[KYBER_NUM_DOMAINS];
|
||||
unsigned int cur_domain;
|
||||
unsigned int batching;
|
||||
wait_queue_t domain_wait[KYBER_NUM_DOMAINS];
|
||||
wait_queue_entry_t domain_wait[KYBER_NUM_DOMAINS];
|
||||
atomic_t wait_index[KYBER_NUM_DOMAINS];
|
||||
};
|
||||
|
||||
|
@ -385,7 +385,7 @@ static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
|
|||
|
||||
for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
|
||||
INIT_LIST_HEAD(&khd->rqs[i]);
|
||||
INIT_LIST_HEAD(&khd->domain_wait[i].task_list);
|
||||
INIT_LIST_HEAD(&khd->domain_wait[i].entry);
|
||||
atomic_set(&khd->wait_index[i], 0);
|
||||
}
|
||||
|
||||
|
@ -503,12 +503,12 @@ static void kyber_flush_busy_ctxs(struct kyber_hctx_data *khd,
|
|||
}
|
||||
}
|
||||
|
||||
static int kyber_domain_wake(wait_queue_t *wait, unsigned mode, int flags,
|
||||
static int kyber_domain_wake(wait_queue_entry_t *wait, unsigned mode, int flags,
|
||||
void *key)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx = READ_ONCE(wait->private);
|
||||
|
||||
list_del_init(&wait->task_list);
|
||||
list_del_init(&wait->entry);
|
||||
blk_mq_run_hw_queue(hctx, true);
|
||||
return 1;
|
||||
}
|
||||
|
@ -519,7 +519,7 @@ static int kyber_get_domain_token(struct kyber_queue_data *kqd,
|
|||
{
|
||||
unsigned int sched_domain = khd->cur_domain;
|
||||
struct sbitmap_queue *domain_tokens = &kqd->domain_tokens[sched_domain];
|
||||
wait_queue_t *wait = &khd->domain_wait[sched_domain];
|
||||
wait_queue_entry_t *wait = &khd->domain_wait[sched_domain];
|
||||
struct sbq_wait_state *ws;
|
||||
int nr;
|
||||
|
||||
|
@ -532,7 +532,7 @@ static int kyber_get_domain_token(struct kyber_queue_data *kqd,
|
|||
* run when one becomes available. Note that this is serialized on
|
||||
* khd->lock, but we still need to be careful about the waker.
|
||||
*/
|
||||
if (list_empty_careful(&wait->task_list)) {
|
||||
if (list_empty_careful(&wait->entry)) {
|
||||
init_waitqueue_func_entry(wait, kyber_domain_wake);
|
||||
wait->private = hctx;
|
||||
ws = sbq_wait_ptr(domain_tokens,
|
||||
|
@ -730,9 +730,9 @@ static int kyber_##name##_waiting_show(void *data, struct seq_file *m) \
|
|||
{ \
|
||||
struct blk_mq_hw_ctx *hctx = data; \
|
||||
struct kyber_hctx_data *khd = hctx->sched_data; \
|
||||
wait_queue_t *wait = &khd->domain_wait[domain]; \
|
||||
wait_queue_entry_t *wait = &khd->domain_wait[domain]; \
|
||||
\
|
||||
seq_printf(m, "%d\n", !list_empty_careful(&wait->task_list)); \
|
||||
seq_printf(m, "%d\n", !list_empty_careful(&wait->entry)); \
|
||||
return 0; \
|
||||
}
|
||||
KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_READ, read)
|
||||
|
|
|
@ -523,7 +523,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
|
|||
struct acpi_pci_root *root;
|
||||
acpi_handle handle = device->handle;
|
||||
int no_aspm = 0;
|
||||
bool hotadd = system_state != SYSTEM_BOOTING;
|
||||
bool hotadd = system_state == SYSTEM_RUNNING;
|
||||
|
||||
root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
|
||||
if (!root)
|
||||
|
|
|
@ -377,7 +377,7 @@ static int __ref get_nid_for_pfn(unsigned long pfn)
|
|||
if (!pfn_valid_within(pfn))
|
||||
return -1;
|
||||
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
|
||||
if (system_state == SYSTEM_BOOTING)
|
||||
if (system_state < SYSTEM_RUNNING)
|
||||
return early_pfn_to_nid(pfn);
|
||||
#endif
|
||||
page = pfn_to_page(pfn);
|
||||
|
|
|
@ -602,7 +602,7 @@ static int btmrvl_service_main_thread(void *data)
|
|||
struct btmrvl_thread *thread = data;
|
||||
struct btmrvl_private *priv = thread->priv;
|
||||
struct btmrvl_adapter *adapter = priv->adapter;
|
||||
wait_queue_t wait;
|
||||
wait_queue_entry_t wait;
|
||||
struct sk_buff *skb;
|
||||
ulong flags;
|
||||
|
||||
|
|
|
@ -821,7 +821,7 @@ static ssize_t ipmi_read(struct file *file,
|
|||
loff_t *ppos)
|
||||
{
|
||||
int rv = 0;
|
||||
wait_queue_t wait;
|
||||
wait_queue_entry_t wait;
|
||||
|
||||
if (count <= 0)
|
||||
return 0;
|
||||
|
|
|
@ -226,7 +226,7 @@ static int pas_cpufreq_cpu_exit(struct cpufreq_policy *policy)
|
|||
* We don't support CPU hotplug. Don't unmap after the system
|
||||
* has already made it to a running state.
|
||||
*/
|
||||
if (system_state != SYSTEM_BOOTING)
|
||||
if (system_state >= SYSTEM_RUNNING)
|
||||
return 0;
|
||||
|
||||
if (sdcasr_mapbase)
|
||||
|
|
|
@ -220,6 +220,7 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
|
|||
entered_state = target_state->enter(dev, drv, index);
|
||||
start_critical_timings();
|
||||
|
||||
sched_clock_idle_wakeup_event();
|
||||
time_end = ns_to_ktime(local_clock());
|
||||
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
|
||||
|
||||
|
|
|
@ -123,7 +123,7 @@ struct drm_i915_gem_request {
|
|||
* It is used by the driver to then queue the request for execution.
|
||||
*/
|
||||
struct i915_sw_fence submit;
|
||||
wait_queue_t submitq;
|
||||
wait_queue_entry_t submitq;
|
||||
wait_queue_head_t execute;
|
||||
|
||||
/* A list of everyone we wait upon, and everyone who waits upon us.
|
||||
|
|
|
@ -152,7 +152,7 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,
|
|||
struct list_head *continuation)
|
||||
{
|
||||
wait_queue_head_t *x = &fence->wait;
|
||||
wait_queue_t *pos, *next;
|
||||
wait_queue_entry_t *pos, *next;
|
||||
unsigned long flags;
|
||||
|
||||
debug_fence_deactivate(fence);
|
||||
|
@ -160,31 +160,30 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,
|
|||
|
||||
/*
|
||||
* To prevent unbounded recursion as we traverse the graph of
|
||||
* i915_sw_fences, we move the task_list from this, the next ready
|
||||
* fence, to the tail of the original fence's task_list
|
||||
* i915_sw_fences, we move the entry list from this, the next ready
|
||||
* fence, to the tail of the original fence's entry list
|
||||
* (and so added to the list to be woken).
|
||||
*/
|
||||
|
||||
spin_lock_irqsave_nested(&x->lock, flags, 1 + !!continuation);
|
||||
if (continuation) {
|
||||
list_for_each_entry_safe(pos, next, &x->task_list, task_list) {
|
||||
list_for_each_entry_safe(pos, next, &x->head, entry) {
|
||||
if (pos->func == autoremove_wake_function)
|
||||
pos->func(pos, TASK_NORMAL, 0, continuation);
|
||||
else
|
||||
list_move_tail(&pos->task_list, continuation);
|
||||
list_move_tail(&pos->entry, continuation);
|
||||
}
|
||||
} else {
|
||||
LIST_HEAD(extra);
|
||||
|
||||
do {
|
||||
list_for_each_entry_safe(pos, next,
|
||||
&x->task_list, task_list)
|
||||
list_for_each_entry_safe(pos, next, &x->head, entry)
|
||||
pos->func(pos, TASK_NORMAL, 0, &extra);
|
||||
|
||||
if (list_empty(&extra))
|
||||
break;
|
||||
|
||||
list_splice_tail_init(&extra, &x->task_list);
|
||||
list_splice_tail_init(&extra, &x->head);
|
||||
} while (1);
|
||||
}
|
||||
spin_unlock_irqrestore(&x->lock, flags);
|
||||
|
@ -254,9 +253,9 @@ void i915_sw_fence_commit(struct i915_sw_fence *fence)
|
|||
__i915_sw_fence_commit(fence);
|
||||
}
|
||||
|
||||
static int i915_sw_fence_wake(wait_queue_t *wq, unsigned mode, int flags, void *key)
|
||||
static int i915_sw_fence_wake(wait_queue_entry_t *wq, unsigned mode, int flags, void *key)
|
||||
{
|
||||
list_del(&wq->task_list);
|
||||
list_del(&wq->entry);
|
||||
__i915_sw_fence_complete(wq->private, key);
|
||||
i915_sw_fence_put(wq->private);
|
||||
if (wq->flags & I915_SW_FENCE_FLAG_ALLOC)
|
||||
|
@ -267,7 +266,7 @@ static int i915_sw_fence_wake(wait_queue_t *wq, unsigned mode, int flags, void *
|
|||
static bool __i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
|
||||
const struct i915_sw_fence * const signaler)
|
||||
{
|
||||
wait_queue_t *wq;
|
||||
wait_queue_entry_t *wq;
|
||||
|
||||
if (__test_and_set_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags))
|
||||
return false;
|
||||
|
@ -275,7 +274,7 @@ static bool __i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
|
|||
if (fence == signaler)
|
||||
return true;
|
||||
|
||||
list_for_each_entry(wq, &fence->wait.task_list, task_list) {
|
||||
list_for_each_entry(wq, &fence->wait.head, entry) {
|
||||
if (wq->func != i915_sw_fence_wake)
|
||||
continue;
|
||||
|
||||
|
@ -288,12 +287,12 @@ static bool __i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
|
|||
|
||||
static void __i915_sw_fence_clear_checked_bit(struct i915_sw_fence *fence)
|
||||
{
|
||||
wait_queue_t *wq;
|
||||
wait_queue_entry_t *wq;
|
||||
|
||||
if (!__test_and_clear_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags))
|
||||
return;
|
||||
|
||||
list_for_each_entry(wq, &fence->wait.task_list, task_list) {
|
||||
list_for_each_entry(wq, &fence->wait.head, entry) {
|
||||
if (wq->func != i915_sw_fence_wake)
|
||||
continue;
|
||||
|
||||
|
@ -320,7 +319,7 @@ static bool i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
|
|||
|
||||
static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
|
||||
struct i915_sw_fence *signaler,
|
||||
wait_queue_t *wq, gfp_t gfp)
|
||||
wait_queue_entry_t *wq, gfp_t gfp)
|
||||
{
|
||||
unsigned long flags;
|
||||
int pending;
|
||||
|
@ -350,7 +349,7 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
|
|||
pending |= I915_SW_FENCE_FLAG_ALLOC;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&wq->task_list);
|
||||
INIT_LIST_HEAD(&wq->entry);
|
||||
wq->flags = pending;
|
||||
wq->func = i915_sw_fence_wake;
|
||||
wq->private = i915_sw_fence_get(fence);
|
||||
|
@ -359,7 +358,7 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
|
|||
|
||||
spin_lock_irqsave(&signaler->wait.lock, flags);
|
||||
if (likely(!i915_sw_fence_done(signaler))) {
|
||||
__add_wait_queue_tail(&signaler->wait, wq);
|
||||
__add_wait_queue_entry_tail(&signaler->wait, wq);
|
||||
pending = 1;
|
||||
} else {
|
||||
i915_sw_fence_wake(wq, 0, 0, NULL);
|
||||
|
@ -372,7 +371,7 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
|
|||
|
||||
int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
|
||||
struct i915_sw_fence *signaler,
|
||||
wait_queue_t *wq)
|
||||
wait_queue_entry_t *wq)
|
||||
{
|
||||
return __i915_sw_fence_await_sw_fence(fence, signaler, wq, 0);
|
||||
}
|
||||
|
|
|
@ -66,7 +66,7 @@ void i915_sw_fence_commit(struct i915_sw_fence *fence);
|
|||
|
||||
int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
|
||||
struct i915_sw_fence *after,
|
||||
wait_queue_t *wq);
|
||||
wait_queue_entry_t *wq);
|
||||
int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence,
|
||||
struct i915_sw_fence *after,
|
||||
gfp_t gfp);
|
||||
|
|
|
@ -375,7 +375,7 @@ struct radeon_fence {
|
|||
unsigned ring;
|
||||
bool is_vm_update;
|
||||
|
||||
wait_queue_t fence_wake;
|
||||
wait_queue_entry_t fence_wake;
|
||||
};
|
||||
|
||||
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
|
||||
|
|
|
@ -158,7 +158,7 @@ int radeon_fence_emit(struct radeon_device *rdev,
|
|||
* for the fence locking itself, so unlocked variants are used for
|
||||
* fence_signal, and remove_wait_queue.
|
||||
*/
|
||||
static int radeon_fence_check_signaled(wait_queue_t *wait, unsigned mode, int flags, void *key)
|
||||
static int radeon_fence_check_signaled(wait_queue_entry_t *wait, unsigned mode, int flags, void *key)
|
||||
{
|
||||
struct radeon_fence *fence;
|
||||
u64 seq;
|
||||
|
|
|
@ -417,7 +417,7 @@ int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible)
|
|||
{
|
||||
struct vga_device *vgadev, *conflict;
|
||||
unsigned long flags;
|
||||
wait_queue_t wait;
|
||||
wait_queue_entry_t wait;
|
||||
int rc = 0;
|
||||
|
||||
vga_check_first_use();
|
||||
|
|
|
@ -1939,7 +1939,7 @@ static int i40iw_virtchnl_receive(struct i40e_info *ldev,
|
|||
bool i40iw_vf_clear_to_send(struct i40iw_sc_dev *dev)
|
||||
{
|
||||
struct i40iw_device *iwdev;
|
||||
wait_queue_t wait;
|
||||
wait_queue_entry_t wait;
|
||||
|
||||
iwdev = dev->back_dev;
|
||||
|
||||
|
|
|
@ -4315,7 +4315,7 @@ int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
|
|||
struct acpi_dmar_atsr *atsr;
|
||||
struct dmar_atsr_unit *atsru;
|
||||
|
||||
if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled)
|
||||
if (system_state >= SYSTEM_RUNNING && !intel_iommu_enabled)
|
||||
return 0;
|
||||
|
||||
atsr = container_of(hdr, struct acpi_dmar_atsr, header);
|
||||
|
@ -4565,7 +4565,7 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
|
|||
struct acpi_dmar_atsr *atsr;
|
||||
struct acpi_dmar_reserved_memory *rmrr;
|
||||
|
||||
if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
|
||||
if (!intel_iommu_enabled && system_state >= SYSTEM_RUNNING)
|
||||
return 0;
|
||||
|
||||
list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
|
||||
|
|
|
@ -103,7 +103,7 @@ static bool of_iommu_driver_present(struct device_node *np)
|
|||
* it never will be. We don't want to defer indefinitely, nor attempt
|
||||
* to dereference __iommu_of_table after it's been freed.
|
||||
*/
|
||||
if (system_state > SYSTEM_BOOTING)
|
||||
if (system_state >= SYSTEM_RUNNING)
|
||||
return false;
|
||||
|
||||
return of_match_node(&__iommu_of_table, np);
|
||||
|
|
|
@ -207,7 +207,7 @@ void bkey_put(struct cache_set *c, struct bkey *k);
|
|||
|
||||
struct btree_op {
|
||||
/* for waiting on btree reserve in btree_split() */
|
||||
wait_queue_t wait;
|
||||
wait_queue_entry_t wait;
|
||||
|
||||
/* Btree level at which we start taking write locks */
|
||||
short lock;
|
||||
|
|
|
@ -144,7 +144,7 @@ static inline int
|
|||
sleep_cond(wait_queue_head_t *wait_queue, int *condition)
|
||||
{
|
||||
int errno = 0;
|
||||
wait_queue_t we;
|
||||
wait_queue_entry_t we;
|
||||
|
||||
init_waitqueue_entry(&we, current);
|
||||
add_wait_queue(wait_queue, &we);
|
||||
|
@ -171,7 +171,7 @@ sleep_timeout_cond(wait_queue_head_t *wait_queue,
|
|||
int *condition,
|
||||
int timeout)
|
||||
{
|
||||
wait_queue_t we;
|
||||
wait_queue_entry_t we;
|
||||
|
||||
init_waitqueue_entry(&we, current);
|
||||
add_wait_queue(wait_queue, &we);
|
||||
|
|
|
@ -3066,7 +3066,7 @@ static int airo_thread(void *data) {
|
|||
if (ai->jobs) {
|
||||
locked = down_interruptible(&ai->sem);
|
||||
} else {
|
||||
wait_queue_t wait;
|
||||
wait_queue_entry_t wait;
|
||||
|
||||
init_waitqueue_entry(&wait, current);
|
||||
add_wait_queue(&ai->thr_wait, &wait);
|
||||
|
|
|
@ -2544,7 +2544,7 @@ static int prism2_ioctl_priv_prism2_param(struct net_device *dev,
|
|||
ret = -EINVAL;
|
||||
}
|
||||
if (local->iw_mode == IW_MODE_MASTER) {
|
||||
wait_queue_t __wait;
|
||||
wait_queue_entry_t __wait;
|
||||
init_waitqueue_entry(&__wait, current);
|
||||
add_wait_queue(&local->hostscan_wq, &__wait);
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
|
|
@ -453,7 +453,7 @@ static int lbs_thread(void *data)
|
|||
{
|
||||
struct net_device *dev = data;
|
||||
struct lbs_private *priv = dev->ml_priv;
|
||||
wait_queue_t wait;
|
||||
wait_queue_entry_t wait;
|
||||
|
||||
lbs_deb_enter(LBS_DEB_THREAD);
|
||||
|
||||
|
|
|
@ -709,7 +709,7 @@ static irqreturn_t dryice_irq(int irq, void *dev_id)
|
|||
/*If the write wait queue is empty then there is no pending
|
||||
operations. It means the interrupt is for DryIce -Security.
|
||||
IRQ must be returned as none.*/
|
||||
if (list_empty_careful(&imxdi->write_wait.task_list))
|
||||
if (list_empty_careful(&imxdi->write_wait.head))
|
||||
return rc;
|
||||
|
||||
/* DSR_WCF clears itself on DSR read */
|
||||
|
|
|
@ -48,7 +48,7 @@
|
|||
#include <linux/wait.h>
|
||||
typedef wait_queue_head_t adpt_wait_queue_head_t;
|
||||
#define ADPT_DECLARE_WAIT_QUEUE_HEAD(wait) DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait)
|
||||
typedef wait_queue_t adpt_wait_queue_t;
|
||||
typedef wait_queue_entry_t adpt_wait_queue_entry_t;
|
||||
|
||||
/*
|
||||
* message structures
|
||||
|
|
|
@ -301,13 +301,13 @@ static uint32_t ips_statupd_copperhead_memio(ips_ha_t *);
|
|||
static uint32_t ips_statupd_morpheus(ips_ha_t *);
|
||||
static ips_scb_t *ips_getscb(ips_ha_t *);
|
||||
static void ips_putq_scb_head(ips_scb_queue_t *, ips_scb_t *);
|
||||
static void ips_putq_wait_tail(ips_wait_queue_t *, struct scsi_cmnd *);
|
||||
static void ips_putq_wait_tail(ips_wait_queue_entry_t *, struct scsi_cmnd *);
|
||||
static void ips_putq_copp_tail(ips_copp_queue_t *,
|
||||
ips_copp_wait_item_t *);
|
||||
static ips_scb_t *ips_removeq_scb_head(ips_scb_queue_t *);
|
||||
static ips_scb_t *ips_removeq_scb(ips_scb_queue_t *, ips_scb_t *);
|
||||
static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_t *);
|
||||
static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_t *,
|
||||
static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_entry_t *);
|
||||
static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_entry_t *,
|
||||
struct scsi_cmnd *);
|
||||
static ips_copp_wait_item_t *ips_removeq_copp(ips_copp_queue_t *,
|
||||
ips_copp_wait_item_t *);
|
||||
|
@ -2871,7 +2871,7 @@ ips_removeq_scb(ips_scb_queue_t * queue, ips_scb_t * item)
|
|||
/* ASSUMED to be called from within the HA lock */
|
||||
/* */
|
||||
/****************************************************************************/
|
||||
static void ips_putq_wait_tail(ips_wait_queue_t *queue, struct scsi_cmnd *item)
|
||||
static void ips_putq_wait_tail(ips_wait_queue_entry_t *queue, struct scsi_cmnd *item)
|
||||
{
|
||||
METHOD_TRACE("ips_putq_wait_tail", 1);
|
||||
|
||||
|
@ -2902,7 +2902,7 @@ static void ips_putq_wait_tail(ips_wait_queue_t *queue, struct scsi_cmnd *item)
|
|||
/* ASSUMED to be called from within the HA lock */
|
||||
/* */
|
||||
/****************************************************************************/
|
||||
static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_t *queue)
|
||||
static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_entry_t *queue)
|
||||
{
|
||||
struct scsi_cmnd *item;
|
||||
|
||||
|
@ -2936,7 +2936,7 @@ static struct scsi_cmnd *ips_removeq_wait_head(ips_wait_queue_t *queue)
|
|||
/* ASSUMED to be called from within the HA lock */
|
||||
/* */
|
||||
/****************************************************************************/
|
||||
static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_t *queue,
|
||||
static struct scsi_cmnd *ips_removeq_wait(ips_wait_queue_entry_t *queue,
|
||||
struct scsi_cmnd *item)
|
||||
{
|
||||
struct scsi_cmnd *p;
|
||||
|
|
|
@ -989,7 +989,7 @@ typedef struct ips_wait_queue {
|
|||
struct scsi_cmnd *head;
|
||||
struct scsi_cmnd *tail;
|
||||
int count;
|
||||
} ips_wait_queue_t;
|
||||
} ips_wait_queue_entry_t;
|
||||
|
||||
typedef struct ips_copp_wait_item {
|
||||
struct scsi_cmnd *scsi_cmd;
|
||||
|
@ -1035,7 +1035,7 @@ typedef struct ips_ha {
|
|||
ips_stat_t sp; /* Status packer pointer */
|
||||
struct ips_scb *scbs; /* Array of all CCBS */
|
||||
struct ips_scb *scb_freelist; /* SCB free list */
|
||||
ips_wait_queue_t scb_waitlist; /* Pending SCB list */
|
||||
ips_wait_queue_entry_t scb_waitlist; /* Pending SCB list */
|
||||
ips_copp_queue_t copp_waitlist; /* Pending PT list */
|
||||
ips_scb_queue_t scb_activelist; /* Active SCB list */
|
||||
IPS_IO_CMD *dummy; /* dummy command */
|
||||
|
|
|
@ -3267,7 +3267,7 @@ int
|
|||
kiblnd_connd(void *arg)
|
||||
{
|
||||
spinlock_t *lock = &kiblnd_data.kib_connd_lock;
|
||||
wait_queue_t wait;
|
||||
wait_queue_entry_t wait;
|
||||
unsigned long flags;
|
||||
struct kib_conn *conn;
|
||||
int timeout;
|
||||
|
@ -3521,7 +3521,7 @@ kiblnd_scheduler(void *arg)
|
|||
long id = (long)arg;
|
||||
struct kib_sched_info *sched;
|
||||
struct kib_conn *conn;
|
||||
wait_queue_t wait;
|
||||
wait_queue_entry_t wait;
|
||||
unsigned long flags;
|
||||
struct ib_wc wc;
|
||||
int did_something;
|
||||
|
@ -3656,7 +3656,7 @@ kiblnd_failover_thread(void *arg)
|
|||
{
|
||||
rwlock_t *glock = &kiblnd_data.kib_global_lock;
|
||||
struct kib_dev *dev;
|
||||
wait_queue_t wait;
|
||||
wait_queue_entry_t wait;
|
||||
unsigned long flags;
|
||||
int rc;
|
||||
|
||||
|
|
|
@ -2166,7 +2166,7 @@ ksocknal_connd(void *arg)
|
|||
{
|
||||
spinlock_t *connd_lock = &ksocknal_data.ksnd_connd_lock;
|
||||
struct ksock_connreq *cr;
|
||||
wait_queue_t wait;
|
||||
wait_queue_entry_t wait;
|
||||
int nloops = 0;
|
||||
int cons_retry = 0;
|
||||
|
||||
|
@ -2554,7 +2554,7 @@ ksocknal_check_peer_timeouts(int idx)
|
|||
int
|
||||
ksocknal_reaper(void *arg)
|
||||
{
|
||||
wait_queue_t wait;
|
||||
wait_queue_entry_t wait;
|
||||
struct ksock_conn *conn;
|
||||
struct ksock_sched *sched;
|
||||
struct list_head enomem_conns;
|
||||
|
|
|
@ -361,7 +361,7 @@ static int libcfs_debug_dumplog_thread(void *arg)
|
|||
|
||||
void libcfs_debug_dumplog(void)
|
||||
{
|
||||
wait_queue_t wait;
|
||||
wait_queue_entry_t wait;
|
||||
struct task_struct *dumper;
|
||||
|
||||
/* we're being careful to ensure that the kernel thread is
|
||||
|
|
|
@ -990,7 +990,7 @@ static int tracefiled(void *arg)
|
|||
complete(&tctl->tctl_start);
|
||||
|
||||
while (1) {
|
||||
wait_queue_t __wait;
|
||||
wait_queue_entry_t __wait;
|
||||
|
||||
pc.pc_want_daemon_pages = 0;
|
||||
collect_pages(&pc);
|
||||
|
|
|
@ -312,7 +312,7 @@ __must_hold(&the_lnet.ln_eq_wait_lock)
|
|||
{
|
||||
int tms = *timeout_ms;
|
||||
int wait;
|
||||
wait_queue_t wl;
|
||||
wait_queue_entry_t wl;
|
||||
unsigned long now;
|
||||
|
||||
if (!tms)
|
||||
|
|
|
@ -516,7 +516,7 @@ lnet_sock_listen(struct socket **sockp, __u32 local_ip, int local_port,
|
|||
int
|
||||
lnet_sock_accept(struct socket **newsockp, struct socket *sock)
|
||||
{
|
||||
wait_queue_t wait;
|
||||
wait_queue_entry_t wait;
|
||||
struct socket *newsock;
|
||||
int rc;
|
||||
|
||||
|
|
|
@ -192,7 +192,7 @@ static int seq_client_alloc_seq(const struct lu_env *env,
|
|||
}
|
||||
|
||||
static int seq_fid_alloc_prep(struct lu_client_seq *seq,
|
||||
wait_queue_t *link)
|
||||
wait_queue_entry_t *link)
|
||||
{
|
||||
if (seq->lcs_update) {
|
||||
add_wait_queue(&seq->lcs_waitq, link);
|
||||
|
@ -223,7 +223,7 @@ static void seq_fid_alloc_fini(struct lu_client_seq *seq)
|
|||
int seq_client_alloc_fid(const struct lu_env *env,
|
||||
struct lu_client_seq *seq, struct lu_fid *fid)
|
||||
{
|
||||
wait_queue_t link;
|
||||
wait_queue_entry_t link;
|
||||
int rc;
|
||||
|
||||
LASSERT(seq);
|
||||
|
@ -290,7 +290,7 @@ EXPORT_SYMBOL(seq_client_alloc_fid);
|
|||
*/
|
||||
void seq_client_flush(struct lu_client_seq *seq)
|
||||
{
|
||||
wait_queue_t link;
|
||||
wait_queue_entry_t link;
|
||||
|
||||
LASSERT(seq);
|
||||
init_waitqueue_entry(&link, current);
|
||||
|
|
|
@ -201,7 +201,7 @@ struct l_wait_info {
|
|||
sigmask(SIGALRM))
|
||||
|
||||
/**
|
||||
* wait_queue_t of Linux (version < 2.6.34) is a FIFO list for exclusively
|
||||
* wait_queue_entry_t of Linux (version < 2.6.34) is a FIFO list for exclusively
|
||||
* waiting threads, which is not always desirable because all threads will
|
||||
* be waken up again and again, even user only needs a few of them to be
|
||||
* active most time. This is not good for performance because cache can
|
||||
|
@ -228,7 +228,7 @@ struct l_wait_info {
|
|||
*/
|
||||
#define __l_wait_event(wq, condition, info, ret, l_add_wait) \
|
||||
do { \
|
||||
wait_queue_t __wait; \
|
||||
wait_queue_entry_t __wait; \
|
||||
long __timeout = info->lwi_timeout; \
|
||||
sigset_t __blocked; \
|
||||
int __allow_intr = info->lwi_allow_intr; \
|
||||
|
|
|
@ -207,7 +207,7 @@ int cl_file_inode_init(struct inode *inode, struct lustre_md *md)
|
|||
static void cl_object_put_last(struct lu_env *env, struct cl_object *obj)
|
||||
{
|
||||
struct lu_object_header *header = obj->co_lu.lo_header;
|
||||
wait_queue_t waiter;
|
||||
wait_queue_entry_t waiter;
|
||||
|
||||
if (unlikely(atomic_read(&header->loh_ref) != 1)) {
|
||||
struct lu_site *site = obj->co_lu.lo_dev->ld_site;
|
||||
|
|
|
@ -370,7 +370,7 @@ struct lov_thread_info {
|
|||
struct ost_lvb lti_lvb;
|
||||
struct cl_2queue lti_cl2q;
|
||||
struct cl_page_list lti_plist;
|
||||
wait_queue_t lti_waiter;
|
||||
wait_queue_entry_t lti_waiter;
|
||||
struct cl_attr lti_attr;
|
||||
};
|
||||
|
||||
|
|
|
@ -371,7 +371,7 @@ static void lov_subobject_kill(const struct lu_env *env, struct lov_object *lov,
|
|||
struct lov_layout_raid0 *r0;
|
||||
struct lu_site *site;
|
||||
struct lu_site_bkt_data *bkt;
|
||||
wait_queue_t *waiter;
|
||||
wait_queue_entry_t *waiter;
|
||||
|
||||
r0 = &lov->u.raid0;
|
||||
LASSERT(r0->lo_sub[idx] == los);
|
||||
|
|
|
@ -556,7 +556,7 @@ EXPORT_SYMBOL(lu_object_print);
|
|||
static struct lu_object *htable_lookup(struct lu_site *s,
|
||||
struct cfs_hash_bd *bd,
|
||||
const struct lu_fid *f,
|
||||
wait_queue_t *waiter,
|
||||
wait_queue_entry_t *waiter,
|
||||
__u64 *version)
|
||||
{
|
||||
struct lu_site_bkt_data *bkt;
|
||||
|
@ -670,7 +670,7 @@ static struct lu_object *lu_object_find_try(const struct lu_env *env,
|
|||
struct lu_device *dev,
|
||||
const struct lu_fid *f,
|
||||
const struct lu_object_conf *conf,
|
||||
wait_queue_t *waiter)
|
||||
wait_queue_entry_t *waiter)
|
||||
{
|
||||
struct lu_object *o;
|
||||
struct lu_object *shadow;
|
||||
|
@ -750,7 +750,7 @@ struct lu_object *lu_object_find_at(const struct lu_env *env,
|
|||
{
|
||||
struct lu_site_bkt_data *bkt;
|
||||
struct lu_object *obj;
|
||||
wait_queue_t wait;
|
||||
wait_queue_entry_t wait;
|
||||
|
||||
while (1) {
|
||||
obj = lu_object_find_try(env, dev, f, conf, &wait);
|
||||
|
|
|
@ -184,7 +184,7 @@ static void hdlcdev_exit(struct slgt_info *info);
|
|||
struct cond_wait {
|
||||
struct cond_wait *next;
|
||||
wait_queue_head_t q;
|
||||
wait_queue_t wait;
|
||||
wait_queue_entry_t wait;
|
||||
unsigned int data;
|
||||
};
|
||||
static void init_cond_wait(struct cond_wait *w, unsigned int data);
|
||||
|
|
|
@ -43,7 +43,7 @@ static void virqfd_deactivate(struct virqfd *virqfd)
|
|||
queue_work(vfio_irqfd_cleanup_wq, &virqfd->shutdown);
|
||||
}
|
||||
|
||||
static int virqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
|
||||
static int virqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
|
||||
{
|
||||
struct virqfd *virqfd = container_of(wait, struct virqfd, wait);
|
||||
unsigned long flags = (unsigned long)key;
|
||||
|
|
|
@ -165,7 +165,7 @@ static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
|
|||
add_wait_queue(wqh, &poll->wait);
|
||||
}
|
||||
|
||||
static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
|
||||
static int vhost_poll_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync,
|
||||
void *key)
|
||||
{
|
||||
struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
|
||||
|
|
|
@ -31,7 +31,7 @@ struct vhost_work {
|
|||
struct vhost_poll {
|
||||
poll_table table;
|
||||
wait_queue_head_t *wqh;
|
||||
wait_queue_t wait;
|
||||
wait_queue_entry_t wait;
|
||||
struct vhost_work work;
|
||||
unsigned long mask;
|
||||
struct vhost_dev *dev;
|
||||
|
|
|
@ -190,6 +190,7 @@ static void do_poweroff(void)
|
|||
{
|
||||
switch (system_state) {
|
||||
case SYSTEM_BOOTING:
|
||||
case SYSTEM_SCHEDULING:
|
||||
orderly_poweroff(true);
|
||||
break;
|
||||
case SYSTEM_RUNNING:
|
||||
|
|
|
@ -83,7 +83,7 @@ struct autofs_info {
|
|||
struct autofs_wait_queue {
|
||||
wait_queue_head_t queue;
|
||||
struct autofs_wait_queue *next;
|
||||
autofs_wqt_t wait_queue_token;
|
||||
autofs_wqt_t wait_queue_entry_token;
|
||||
/* We use the following to see what we are waiting for */
|
||||
struct qstr name;
|
||||
u32 dev;
|
||||
|
|
|
@ -104,7 +104,7 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
|
|||
size_t pktsz;
|
||||
|
||||
pr_debug("wait id = 0x%08lx, name = %.*s, type=%d\n",
|
||||
(unsigned long) wq->wait_queue_token,
|
||||
(unsigned long) wq->wait_queue_entry_token,
|
||||
wq->name.len, wq->name.name, type);
|
||||
|
||||
memset(&pkt, 0, sizeof(pkt)); /* For security reasons */
|
||||
|
@ -120,7 +120,7 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
|
|||
|
||||
pktsz = sizeof(*mp);
|
||||
|
||||
mp->wait_queue_token = wq->wait_queue_token;
|
||||
mp->wait_queue_entry_token = wq->wait_queue_entry_token;
|
||||
mp->len = wq->name.len;
|
||||
memcpy(mp->name, wq->name.name, wq->name.len);
|
||||
mp->name[wq->name.len] = '\0';
|
||||
|
@ -133,7 +133,7 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
|
|||
|
||||
pktsz = sizeof(*ep);
|
||||
|
||||
ep->wait_queue_token = wq->wait_queue_token;
|
||||
ep->wait_queue_entry_token = wq->wait_queue_entry_token;
|
||||
ep->len = wq->name.len;
|
||||
memcpy(ep->name, wq->name.name, wq->name.len);
|
||||
ep->name[wq->name.len] = '\0';
|
||||
|
@ -153,7 +153,7 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
|
|||
|
||||
pktsz = sizeof(*packet);
|
||||
|
||||
packet->wait_queue_token = wq->wait_queue_token;
|
||||
packet->wait_queue_entry_token = wq->wait_queue_entry_token;
|
||||
packet->len = wq->name.len;
|
||||
memcpy(packet->name, wq->name.name, wq->name.len);
|
||||
packet->name[wq->name.len] = '\0';
|
||||
|
@ -428,7 +428,7 @@ int autofs4_wait(struct autofs_sb_info *sbi,
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
wq->wait_queue_token = autofs4_next_wait_queue;
|
||||
wq->wait_queue_entry_token = autofs4_next_wait_queue;
|
||||
if (++autofs4_next_wait_queue == 0)
|
||||
autofs4_next_wait_queue = 1;
|
||||
wq->next = sbi->queues;
|
||||
|
@ -461,7 +461,7 @@ int autofs4_wait(struct autofs_sb_info *sbi,
|
|||
}
|
||||
|
||||
pr_debug("new wait id = 0x%08lx, name = %.*s, nfy=%d\n",
|
||||
(unsigned long) wq->wait_queue_token, wq->name.len,
|
||||
(unsigned long) wq->wait_queue_entry_token, wq->name.len,
|
||||
wq->name.name, notify);
|
||||
|
||||
/*
|
||||
|
@ -471,7 +471,7 @@ int autofs4_wait(struct autofs_sb_info *sbi,
|
|||
} else {
|
||||
wq->wait_ctr++;
|
||||
pr_debug("existing wait id = 0x%08lx, name = %.*s, nfy=%d\n",
|
||||
(unsigned long) wq->wait_queue_token, wq->name.len,
|
||||
(unsigned long) wq->wait_queue_entry_token, wq->name.len,
|
||||
wq->name.name, notify);
|
||||
mutex_unlock(&sbi->wq_mutex);
|
||||
kfree(qstr.name);
|
||||
|
@ -550,13 +550,13 @@ int autofs4_wait(struct autofs_sb_info *sbi,
|
|||
}
|
||||
|
||||
|
||||
int autofs4_wait_release(struct autofs_sb_info *sbi, autofs_wqt_t wait_queue_token, int status)
|
||||
int autofs4_wait_release(struct autofs_sb_info *sbi, autofs_wqt_t wait_queue_entry_token, int status)
|
||||
{
|
||||
struct autofs_wait_queue *wq, **wql;
|
||||
|
||||
mutex_lock(&sbi->wq_mutex);
|
||||
for (wql = &sbi->queues; (wq = *wql) != NULL; wql = &wq->next) {
|
||||
if (wq->wait_queue_token == wait_queue_token)
|
||||
if (wq->wait_queue_entry_token == wait_queue_entry_token)
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
|
||||
#include <linux/fscache-cache.h>
|
||||
#include <linux/timer.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/wait_bit.h>
|
||||
#include <linux/cred.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/security.h>
|
||||
|
@ -97,7 +97,7 @@ struct cachefiles_cache {
|
|||
* backing file read tracking
|
||||
*/
|
||||
struct cachefiles_one_read {
|
||||
wait_queue_t monitor; /* link into monitored waitqueue */
|
||||
wait_queue_entry_t monitor; /* link into monitored waitqueue */
|
||||
struct page *back_page; /* backing file page we're waiting for */
|
||||
struct page *netfs_page; /* netfs page we're going to fill */
|
||||
struct fscache_retrieval *op; /* retrieval op covering this */
|
||||
|
|
|
@ -204,7 +204,7 @@ static int cachefiles_mark_object_active(struct cachefiles_cache *cache,
|
|||
wait_queue_head_t *wq;
|
||||
|
||||
signed long timeout = 60 * HZ;
|
||||
wait_queue_t wait;
|
||||
wait_queue_entry_t wait;
|
||||
bool requeue;
|
||||
|
||||
/* if the object we're waiting for is queued for processing,
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
* - we use this to detect read completion of backing pages
|
||||
* - the caller holds the waitqueue lock
|
||||
*/
|
||||
static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode,
|
||||
static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode,
|
||||
int sync, void *_key)
|
||||
{
|
||||
struct cachefiles_one_read *monitor =
|
||||
|
@ -48,7 +48,7 @@ static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode,
|
|||
}
|
||||
|
||||
/* remove from the waitqueue */
|
||||
list_del(&wait->task_list);
|
||||
list_del(&wait->entry);
|
||||
|
||||
/* move onto the action list and queue for FS-Cache thread pool */
|
||||
ASSERT(monitor->op);
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#include <linux/pagemap.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <linux/sched/signal.h>
|
||||
#include <linux/wait_bit.h>
|
||||
|
||||
#include <asm/div64.h>
|
||||
#include "cifsfs.h"
|
||||
|
|
4
fs/dax.c
4
fs/dax.c
|
@ -84,7 +84,7 @@ struct exceptional_entry_key {
|
|||
};
|
||||
|
||||
struct wait_exceptional_entry_queue {
|
||||
wait_queue_t wait;
|
||||
wait_queue_entry_t wait;
|
||||
struct exceptional_entry_key key;
|
||||
};
|
||||
|
||||
|
@ -108,7 +108,7 @@ static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
|
|||
return wait_table + hash;
|
||||
}
|
||||
|
||||
static int wake_exceptional_entry_func(wait_queue_t *wait, unsigned int mode,
|
||||
static int wake_exceptional_entry_func(wait_queue_entry_t *wait, unsigned int mode,
|
||||
int sync, void *keyp)
|
||||
{
|
||||
struct exceptional_entry_key *key = keyp;
|
||||
|
|
|
@ -191,7 +191,7 @@ static void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
|
|||
* This is used to atomically remove a wait queue entry from the eventfd wait
|
||||
* queue head, and read/reset the counter value.
|
||||
*/
|
||||
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_t *wait,
|
||||
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
|
||||
__u64 *cnt)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
|
|
@ -244,7 +244,7 @@ struct eppoll_entry {
|
|||
* Wait queue item that will be linked to the target file wait
|
||||
* queue head.
|
||||
*/
|
||||
wait_queue_t wait;
|
||||
wait_queue_entry_t wait;
|
||||
|
||||
/* The wait queue head that linked the "wait" wait queue item */
|
||||
wait_queue_head_t *whead;
|
||||
|
@ -347,13 +347,13 @@ static inline int ep_is_linked(struct list_head *p)
|
|||
return !list_empty(p);
|
||||
}
|
||||
|
||||
static inline struct eppoll_entry *ep_pwq_from_wait(wait_queue_t *p)
|
||||
static inline struct eppoll_entry *ep_pwq_from_wait(wait_queue_entry_t *p)
|
||||
{
|
||||
return container_of(p, struct eppoll_entry, wait);
|
||||
}
|
||||
|
||||
/* Get the "struct epitem" from a wait queue pointer */
|
||||
static inline struct epitem *ep_item_from_wait(wait_queue_t *p)
|
||||
static inline struct epitem *ep_item_from_wait(wait_queue_entry_t *p)
|
||||
{
|
||||
return container_of(p, struct eppoll_entry, wait)->base;
|
||||
}
|
||||
|
@ -1078,7 +1078,7 @@ static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd)
|
|||
* mechanism. It is called by the stored file descriptors when they
|
||||
* have events to report.
|
||||
*/
|
||||
static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *key)
|
||||
static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
|
||||
{
|
||||
int pwake = 0;
|
||||
unsigned long flags;
|
||||
|
@ -1094,7 +1094,7 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k
|
|||
* can't use __remove_wait_queue(). whead->lock is held by
|
||||
* the caller.
|
||||
*/
|
||||
list_del_init(&wait->task_list);
|
||||
list_del_init(&wait->entry);
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&ep->lock, flags);
|
||||
|
@ -1699,7 +1699,7 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
|
|||
int res = 0, eavail, timed_out = 0;
|
||||
unsigned long flags;
|
||||
u64 slack = 0;
|
||||
wait_queue_t wait;
|
||||
wait_queue_entry_t wait;
|
||||
ktime_t expires, *to = NULL;
|
||||
|
||||
if (timeout > 0) {
|
||||
|
|
|
@ -34,7 +34,7 @@ void pin_insert(struct fs_pin *pin, struct vfsmount *m)
|
|||
|
||||
void pin_kill(struct fs_pin *p)
|
||||
{
|
||||
wait_queue_t wait;
|
||||
wait_queue_entry_t wait;
|
||||
|
||||
if (!p) {
|
||||
rcu_read_unlock();
|
||||
|
@ -61,7 +61,7 @@ void pin_kill(struct fs_pin *p)
|
|||
rcu_read_unlock();
|
||||
schedule();
|
||||
rcu_read_lock();
|
||||
if (likely(list_empty(&wait.task_list)))
|
||||
if (likely(list_empty(&wait.entry)))
|
||||
break;
|
||||
/* OK, we know p couldn't have been freed yet */
|
||||
spin_lock_irq(&p->wait.lock);
|
||||
|
|
|
@ -1892,11 +1892,11 @@ static void __wait_on_freeing_inode(struct inode *inode)
|
|||
wait_queue_head_t *wq;
|
||||
DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW);
|
||||
wq = bit_waitqueue(&inode->i_state, __I_NEW);
|
||||
prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
|
||||
prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
|
||||
spin_unlock(&inode->i_lock);
|
||||
spin_unlock(&inode_hash_lock);
|
||||
schedule();
|
||||
finish_wait(wq, &wait.wait);
|
||||
finish_wait(wq, &wait.wq_entry);
|
||||
spin_lock(&inode_hash_lock);
|
||||
}
|
||||
|
||||
|
@ -2039,11 +2039,11 @@ static void __inode_dio_wait(struct inode *inode)
|
|||
DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP);
|
||||
|
||||
do {
|
||||
prepare_to_wait(wq, &q.wait, TASK_UNINTERRUPTIBLE);
|
||||
prepare_to_wait(wq, &q.wq_entry, TASK_UNINTERRUPTIBLE);
|
||||
if (atomic_read(&inode->i_dio_count))
|
||||
schedule();
|
||||
} while (atomic_read(&inode->i_dio_count));
|
||||
finish_wait(wq, &q.wait);
|
||||
finish_wait(wq, &q.wq_entry);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -2579,10 +2579,10 @@ void jbd2_journal_release_jbd_inode(journal_t *journal,
|
|||
wait_queue_head_t *wq;
|
||||
DEFINE_WAIT_BIT(wait, &jinode->i_flags, __JI_COMMIT_RUNNING);
|
||||
wq = bit_waitqueue(&jinode->i_flags, __JI_COMMIT_RUNNING);
|
||||
prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
|
||||
prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
|
||||
spin_unlock(&journal->j_list_lock);
|
||||
schedule();
|
||||
finish_wait(wq, &wait.wait);
|
||||
finish_wait(wq, &wait.wq_entry);
|
||||
goto restart;
|
||||
}
|
||||
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
#include <linux/security.h>
|
||||
#include <linux/crc32.h>
|
||||
#include <linux/nfs_page.h>
|
||||
#include <linux/wait_bit.h>
|
||||
|
||||
#define NFS_MS_MASK (MS_RDONLY|MS_NOSUID|MS_NODEV|MS_NOEXEC|MS_SYNCHRONOUS)
|
||||
|
||||
|
|
|
@ -6373,7 +6373,7 @@ struct nfs4_lock_waiter {
|
|||
};
|
||||
|
||||
static int
|
||||
nfs4_wake_lock_waiter(wait_queue_t *wait, unsigned int mode, int flags, void *key)
|
||||
nfs4_wake_lock_waiter(wait_queue_entry_t *wait, unsigned int mode, int flags, void *key)
|
||||
{
|
||||
int ret;
|
||||
struct cb_notify_lock_args *cbnl = key;
|
||||
|
@ -6416,7 +6416,7 @@ nfs4_retry_setlk(struct nfs4_state *state, int cmd, struct file_lock *request)
|
|||
.inode = state->inode,
|
||||
.owner = &owner,
|
||||
.notified = false };
|
||||
wait_queue_t wait;
|
||||
wait_queue_entry_t wait;
|
||||
|
||||
/* Don't bother with waitqueue if we don't expect a callback */
|
||||
if (!test_bit(NFS_STATE_MAY_NOTIFY_LOCK, &state->flags))
|
||||
|
|
|
@ -2161,7 +2161,7 @@ void nilfs_flush_segment(struct super_block *sb, ino_t ino)
|
|||
}
|
||||
|
||||
struct nilfs_segctor_wait_request {
|
||||
wait_queue_t wq;
|
||||
wait_queue_entry_t wq;
|
||||
__u32 seq;
|
||||
int err;
|
||||
atomic_t done;
|
||||
|
@ -2206,8 +2206,7 @@ static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err)
|
|||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&sci->sc_wait_request.lock, flags);
|
||||
list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.task_list,
|
||||
wq.task_list) {
|
||||
list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.head, wq.entry) {
|
||||
if (!atomic_read(&wrq->done) &&
|
||||
nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq)) {
|
||||
wrq->err = err;
|
||||
|
|
|
@ -46,8 +46,8 @@ static void run_down(struct slot_map *m)
|
|||
spin_lock(&m->q.lock);
|
||||
if (m->c != -1) {
|
||||
for (;;) {
|
||||
if (likely(list_empty(&wait.task_list)))
|
||||
__add_wait_queue_tail(&m->q, &wait);
|
||||
if (likely(list_empty(&wait.entry)))
|
||||
__add_wait_queue_entry_tail(&m->q, &wait);
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
|
||||
if (m->c == -1)
|
||||
|
@ -84,8 +84,8 @@ static int wait_for_free(struct slot_map *m)
|
|||
|
||||
do {
|
||||
long n = left, t;
|
||||
if (likely(list_empty(&wait.task_list)))
|
||||
__add_wait_queue_tail_exclusive(&m->q, &wait);
|
||||
if (likely(list_empty(&wait.entry)))
|
||||
__add_wait_queue_entry_tail_exclusive(&m->q, &wait);
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
||||
if (m->c > 0)
|
||||
|
@ -108,8 +108,8 @@ static int wait_for_free(struct slot_map *m)
|
|||
left = -EINTR;
|
||||
} while (left > 0);
|
||||
|
||||
if (!list_empty(&wait.task_list))
|
||||
list_del(&wait.task_list);
|
||||
if (!list_empty(&wait.entry))
|
||||
list_del(&wait.entry);
|
||||
else if (left <= 0 && waitqueue_active(&m->q))
|
||||
__wake_up_locked_key(&m->q, TASK_INTERRUPTIBLE, NULL);
|
||||
__set_current_state(TASK_RUNNING);
|
||||
|
|
|
@ -2956,7 +2956,7 @@ void reiserfs_wait_on_write_block(struct super_block *s)
|
|||
|
||||
static void queue_log_writer(struct super_block *s)
|
||||
{
|
||||
wait_queue_t wait;
|
||||
wait_queue_entry_t wait;
|
||||
struct reiserfs_journal *journal = SB_JOURNAL(s);
|
||||
set_bit(J_WRITERS_QUEUED, &journal->j_state);
|
||||
|
||||
|
|
|
@ -180,7 +180,7 @@ static struct poll_table_entry *poll_get_entry(struct poll_wqueues *p)
|
|||
return table->entry++;
|
||||
}
|
||||
|
||||
static int __pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
|
||||
static int __pollwake(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
|
||||
{
|
||||
struct poll_wqueues *pwq = wait->private;
|
||||
DECLARE_WAITQUEUE(dummy_wait, pwq->polling_task);
|
||||
|
@ -206,7 +206,7 @@ static int __pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
|
|||
return default_wake_function(&dummy_wait, mode, sync, key);
|
||||
}
|
||||
|
||||
static int pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
|
||||
static int pollwake(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
|
||||
{
|
||||
struct poll_table_entry *entry;
|
||||
|
||||
|
|
|
@ -43,7 +43,7 @@ void signalfd_cleanup(struct sighand_struct *sighand)
|
|||
if (likely(!waitqueue_active(wqh)))
|
||||
return;
|
||||
|
||||
/* wait_queue_t->func(POLLFREE) should do remove_wait_queue() */
|
||||
/* wait_queue_entry_t->func(POLLFREE) should do remove_wait_queue() */
|
||||
wake_up_poll(wqh, POLLHUP | POLLFREE);
|
||||
}
|
||||
|
||||
|
|
|
@ -81,7 +81,7 @@ struct userfaultfd_unmap_ctx {
|
|||
|
||||
struct userfaultfd_wait_queue {
|
||||
struct uffd_msg msg;
|
||||
wait_queue_t wq;
|
||||
wait_queue_entry_t wq;
|
||||
struct userfaultfd_ctx *ctx;
|
||||
bool waken;
|
||||
};
|
||||
|
@ -91,7 +91,7 @@ struct userfaultfd_wake_range {
|
|||
unsigned long len;
|
||||
};
|
||||
|
||||
static int userfaultfd_wake_function(wait_queue_t *wq, unsigned mode,
|
||||
static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode,
|
||||
int wake_flags, void *key)
|
||||
{
|
||||
struct userfaultfd_wake_range *range = key;
|
||||
|
@ -129,7 +129,7 @@ static int userfaultfd_wake_function(wait_queue_t *wq, unsigned mode,
|
|||
* wouldn't be enough, the smp_mb__before_spinlock is
|
||||
* enough to avoid an explicit smp_mb() here.
|
||||
*/
|
||||
list_del_init(&wq->task_list);
|
||||
list_del_init(&wq->entry);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
@ -522,13 +522,13 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
|
|||
* and it's fine not to block on the spinlock. The uwq on this
|
||||
* kernel stack can be released after the list_del_init.
|
||||
*/
|
||||
if (!list_empty_careful(&uwq.wq.task_list)) {
|
||||
if (!list_empty_careful(&uwq.wq.entry)) {
|
||||
spin_lock(&ctx->fault_pending_wqh.lock);
|
||||
/*
|
||||
* No need of list_del_init(), the uwq on the stack
|
||||
* will be freed shortly anyway.
|
||||
*/
|
||||
list_del(&uwq.wq.task_list);
|
||||
list_del(&uwq.wq.entry);
|
||||
spin_unlock(&ctx->fault_pending_wqh.lock);
|
||||
}
|
||||
|
||||
|
@ -860,7 +860,7 @@ static int userfaultfd_release(struct inode *inode, struct file *file)
|
|||
static inline struct userfaultfd_wait_queue *find_userfault_in(
|
||||
wait_queue_head_t *wqh)
|
||||
{
|
||||
wait_queue_t *wq;
|
||||
wait_queue_entry_t *wq;
|
||||
struct userfaultfd_wait_queue *uwq;
|
||||
|
||||
VM_BUG_ON(!spin_is_locked(&wqh->lock));
|
||||
|
@ -869,7 +869,7 @@ static inline struct userfaultfd_wait_queue *find_userfault_in(
|
|||
if (!waitqueue_active(wqh))
|
||||
goto out;
|
||||
/* walk in reverse to provide FIFO behavior to read userfaults */
|
||||
wq = list_last_entry(&wqh->task_list, typeof(*wq), task_list);
|
||||
wq = list_last_entry(&wqh->head, typeof(*wq), entry);
|
||||
uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
|
||||
out:
|
||||
return uwq;
|
||||
|
@ -1003,14 +1003,14 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
|
|||
* changes __remove_wait_queue() to use
|
||||
* list_del_init() in turn breaking the
|
||||
* !list_empty_careful() check in
|
||||
* handle_userfault(). The uwq->wq.task_list
|
||||
* handle_userfault(). The uwq->wq.head list
|
||||
* must never be empty at any time during the
|
||||
* refile, or the waitqueue could disappear
|
||||
* from under us. The "wait_queue_head_t"
|
||||
* parameter of __remove_wait_queue() is unused
|
||||
* anyway.
|
||||
*/
|
||||
list_del(&uwq->wq.task_list);
|
||||
list_del(&uwq->wq.entry);
|
||||
__add_wait_queue(&ctx->fault_wqh, &uwq->wq);
|
||||
|
||||
write_seqcount_end(&ctx->refile_seq);
|
||||
|
@ -1032,7 +1032,7 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
|
|||
fork_nctx = (struct userfaultfd_ctx *)
|
||||
(unsigned long)
|
||||
uwq->msg.arg.reserved.reserved1;
|
||||
list_move(&uwq->wq.task_list, &fork_event);
|
||||
list_move(&uwq->wq.entry, &fork_event);
|
||||
spin_unlock(&ctx->event_wqh.lock);
|
||||
ret = 0;
|
||||
break;
|
||||
|
@ -1069,8 +1069,8 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
|
|||
if (!list_empty(&fork_event)) {
|
||||
uwq = list_first_entry(&fork_event,
|
||||
typeof(*uwq),
|
||||
wq.task_list);
|
||||
list_del(&uwq->wq.task_list);
|
||||
wq.entry);
|
||||
list_del(&uwq->wq.entry);
|
||||
__add_wait_queue(&ctx->event_wqh, &uwq->wq);
|
||||
userfaultfd_event_complete(ctx, uwq);
|
||||
}
|
||||
|
@ -1747,17 +1747,17 @@ static long userfaultfd_ioctl(struct file *file, unsigned cmd,
|
|||
static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f)
|
||||
{
|
||||
struct userfaultfd_ctx *ctx = f->private_data;
|
||||
wait_queue_t *wq;
|
||||
wait_queue_entry_t *wq;
|
||||
struct userfaultfd_wait_queue *uwq;
|
||||
unsigned long pending = 0, total = 0;
|
||||
|
||||
spin_lock(&ctx->fault_pending_wqh.lock);
|
||||
list_for_each_entry(wq, &ctx->fault_pending_wqh.task_list, task_list) {
|
||||
list_for_each_entry(wq, &ctx->fault_pending_wqh.head, entry) {
|
||||
uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
|
||||
pending++;
|
||||
total++;
|
||||
}
|
||||
list_for_each_entry(wq, &ctx->fault_wqh.task_list, task_list) {
|
||||
list_for_each_entry(wq, &ctx->fault_wqh.head, entry) {
|
||||
uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
|
||||
total++;
|
||||
}
|
||||
|
|
|
@ -269,12 +269,12 @@ xfs_inew_wait(
|
|||
DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT);
|
||||
|
||||
do {
|
||||
prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
|
||||
prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
|
||||
if (!xfs_iflags_test(ip, XFS_INEW))
|
||||
break;
|
||||
schedule();
|
||||
} while (true);
|
||||
finish_wait(wq, &wait.wait);
|
||||
finish_wait(wq, &wait.wq_entry);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -622,12 +622,12 @@ __xfs_iflock(
|
|||
DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IFLOCK_BIT);
|
||||
|
||||
do {
|
||||
prepare_to_wait_exclusive(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
|
||||
prepare_to_wait_exclusive(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
|
||||
if (xfs_isiflocked(ip))
|
||||
io_schedule();
|
||||
} while (!xfs_iflock_nowait(ip));
|
||||
|
||||
finish_wait(wq, &wait.wait);
|
||||
finish_wait(wq, &wait.wq_entry);
|
||||
}
|
||||
|
||||
STATIC uint
|
||||
|
@ -2486,11 +2486,11 @@ __xfs_iunpin_wait(
|
|||
xfs_iunpin(ip);
|
||||
|
||||
do {
|
||||
prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
|
||||
prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
|
||||
if (xfs_ipincount(ip))
|
||||
io_schedule();
|
||||
} while (xfs_ipincount(ip));
|
||||
finish_wait(wq, &wait.wait);
|
||||
finish_wait(wq, &wait.wq_entry);
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -33,7 +33,7 @@ struct blk_mq_hw_ctx {
|
|||
struct blk_mq_ctx **ctxs;
|
||||
unsigned int nr_ctx;
|
||||
|
||||
wait_queue_t dispatch_wait;
|
||||
wait_queue_entry_t dispatch_wait;
|
||||
atomic_t wait_index;
|
||||
|
||||
struct blk_mq_tags *tags;
|
||||
|
|
|
@ -96,6 +96,7 @@ struct clocksource {
|
|||
void (*suspend)(struct clocksource *cs);
|
||||
void (*resume)(struct clocksource *cs);
|
||||
void (*mark_unstable)(struct clocksource *cs);
|
||||
void (*tick_stable)(struct clocksource *cs);
|
||||
|
||||
/* private: */
|
||||
#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
|
||||
|
|
|
@ -236,6 +236,23 @@ unsigned int cpumask_local_spread(unsigned int i, int node);
|
|||
(cpu) = cpumask_next_zero((cpu), (mask)), \
|
||||
(cpu) < nr_cpu_ids;)
|
||||
|
||||
extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap);
|
||||
|
||||
/**
|
||||
* for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location
|
||||
* @cpu: the (optionally unsigned) integer iterator
|
||||
* @mask: the cpumask poiter
|
||||
* @start: the start location
|
||||
*
|
||||
* The implementation does not assume any bit in @mask is set (including @start).
|
||||
*
|
||||
* After the loop, cpu is >= nr_cpu_ids.
|
||||
*/
|
||||
#define for_each_cpu_wrap(cpu, mask, start) \
|
||||
for ((cpu) = cpumask_next_wrap((start)-1, (mask), (start), false); \
|
||||
(cpu) < nr_cpumask_bits; \
|
||||
(cpu) = cpumask_next_wrap((cpu), (mask), (start), true))
|
||||
|
||||
/**
|
||||
* for_each_cpu_and - iterate over every cpu in both masks
|
||||
* @cpu: the (optionally unsigned) integer iterator
|
||||
|
@ -276,6 +293,12 @@ static inline void cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
|
|||
set_bit(cpumask_check(cpu), cpumask_bits(dstp));
|
||||
}
|
||||
|
||||
static inline void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
|
||||
{
|
||||
__set_bit(cpumask_check(cpu), cpumask_bits(dstp));
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* cpumask_clear_cpu - clear a cpu in a cpumask
|
||||
* @cpu: cpu number (< nr_cpu_ids)
|
||||
|
@ -286,6 +309,11 @@ static inline void cpumask_clear_cpu(int cpu, struct cpumask *dstp)
|
|||
clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
|
||||
}
|
||||
|
||||
static inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
|
||||
{
|
||||
__clear_bit(cpumask_check(cpu), cpumask_bits(dstp));
|
||||
}
|
||||
|
||||
/**
|
||||
* cpumask_test_cpu - test for a cpu in a cpumask
|
||||
* @cpu: cpu number (< nr_cpu_ids)
|
||||
|
|
|
@ -37,7 +37,7 @@ struct eventfd_ctx *eventfd_ctx_fdget(int fd);
|
|||
struct eventfd_ctx *eventfd_ctx_fileget(struct file *file);
|
||||
__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n);
|
||||
ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt);
|
||||
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_t *wait,
|
||||
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
|
||||
__u64 *cnt);
|
||||
|
||||
#else /* CONFIG_EVENTFD */
|
||||
|
@ -73,7 +73,7 @@ static inline ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait,
|
|||
}
|
||||
|
||||
static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx,
|
||||
wait_queue_t *wait, __u64 *cnt)
|
||||
wait_queue_entry_t *wait, __u64 *cnt)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
#define _LINUX_FS_H
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/wait_bit.h>
|
||||
#include <linux/kdev_t.h>
|
||||
#include <linux/dcache.h>
|
||||
#include <linux/path.h>
|
||||
|
|
|
@ -490,9 +490,13 @@ extern int root_mountflags;
|
|||
|
||||
extern bool early_boot_irqs_disabled;
|
||||
|
||||
/* Values used for system_state */
|
||||
/*
|
||||
* Values used for system_state. Ordering of the states must not be changed
|
||||
* as code checks for <, <=, >, >= STATE.
|
||||
*/
|
||||
extern enum system_states {
|
||||
SYSTEM_BOOTING,
|
||||
SYSTEM_SCHEDULING,
|
||||
SYSTEM_RUNNING,
|
||||
SYSTEM_HALT,
|
||||
SYSTEM_POWER_OFF,
|
||||
|
|
|
@ -46,7 +46,7 @@ struct kvm_kernel_irqfd_resampler {
|
|||
struct kvm_kernel_irqfd {
|
||||
/* Used for MSI fast-path */
|
||||
struct kvm *kvm;
|
||||
wait_queue_t wait;
|
||||
wait_queue_entry_t wait;
|
||||
/* Update side is protected by irqfds.lock */
|
||||
struct kvm_kernel_irq_routing_entry irq_entry;
|
||||
seqcount_t irq_entry_sc;
|
||||
|
|
|
@ -109,6 +109,25 @@ static inline void init_llist_head(struct llist_head *list)
|
|||
#define llist_for_each(pos, node) \
|
||||
for ((pos) = (node); pos; (pos) = (pos)->next)
|
||||
|
||||
/**
|
||||
* llist_for_each_safe - iterate over some deleted entries of a lock-less list
|
||||
* safe against removal of list entry
|
||||
* @pos: the &struct llist_node to use as a loop cursor
|
||||
* @n: another &struct llist_node to use as temporary storage
|
||||
* @node: the first entry of deleted list entries
|
||||
*
|
||||
* In general, some entries of the lock-less list can be traversed
|
||||
* safely only after being deleted from list, so start with an entry
|
||||
* instead of list head.
|
||||
*
|
||||
* If being used on entries deleted from lock-less list directly, the
|
||||
* traverse order is from the newest to the oldest added entry. If
|
||||
* you want to traverse from the oldest to the newest, you must
|
||||
* reverse the order by yourself before traversing.
|
||||
*/
|
||||
#define llist_for_each_safe(pos, n, node) \
|
||||
for ((pos) = (node); (pos) && ((n) = (pos)->next, true); (pos) = (n))
|
||||
|
||||
/**
|
||||
* llist_for_each_entry - iterate over some deleted entries of lock-less list of given type
|
||||
* @pos: the type * to use as a loop cursor.
|
||||
|
|
|
@ -524,7 +524,7 @@ void page_endio(struct page *page, bool is_write, int err);
|
|||
/*
|
||||
* Add an arbitrary waiter to a page's wait queue
|
||||
*/
|
||||
extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
|
||||
extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter);
|
||||
|
||||
/*
|
||||
* Fault everything in given userspace address range in.
|
||||
|
|
|
@ -75,7 +75,7 @@ static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc)
|
|||
struct poll_table_entry {
|
||||
struct file *filp;
|
||||
unsigned long key;
|
||||
wait_queue_t wait;
|
||||
wait_queue_entry_t wait;
|
||||
wait_queue_head_t *wait_address;
|
||||
};
|
||||
|
||||
|
|
|
@ -421,7 +421,8 @@ struct sched_dl_entity {
|
|||
u64 dl_runtime; /* Maximum runtime for each instance */
|
||||
u64 dl_deadline; /* Relative deadline of each instance */
|
||||
u64 dl_period; /* Separation of two instances (period) */
|
||||
u64 dl_bw; /* dl_runtime / dl_deadline */
|
||||
u64 dl_bw; /* dl_runtime / dl_period */
|
||||
u64 dl_density; /* dl_runtime / dl_deadline */
|
||||
|
||||
/*
|
||||
* Actual scheduling parameters. Initialized with the values above,
|
||||
|
@ -445,16 +446,33 @@ struct sched_dl_entity {
|
|||
*
|
||||
* @dl_yielded tells if task gave up the CPU before consuming
|
||||
* all its available runtime during the last job.
|
||||
*
|
||||
* @dl_non_contending tells if the task is inactive while still
|
||||
* contributing to the active utilization. In other words, it
|
||||
* indicates if the inactive timer has been armed and its handler
|
||||
* has not been executed yet. This flag is useful to avoid race
|
||||
* conditions between the inactive timer handler and the wakeup
|
||||
* code.
|
||||
*/
|
||||
int dl_throttled;
|
||||
int dl_boosted;
|
||||
int dl_yielded;
|
||||
int dl_non_contending;
|
||||
|
||||
/*
|
||||
* Bandwidth enforcement timer. Each -deadline task has its
|
||||
* own bandwidth to be enforced, thus we need one timer per task.
|
||||
*/
|
||||
struct hrtimer dl_timer;
|
||||
|
||||
/*
|
||||
* Inactive timer, responsible for decreasing the active utilization
|
||||
* at the "0-lag time". When a -deadline task blocks, it contributes
|
||||
* to GRUB's active utilization until the "0-lag time", hence a
|
||||
* timer is needed to decrease the active utilization at the correct
|
||||
* time.
|
||||
*/
|
||||
struct hrtimer inactive_timer;
|
||||
};
|
||||
|
||||
union rcu_special {
|
||||
|
@ -1096,8 +1114,6 @@ static inline struct pid *task_session(struct task_struct *task)
|
|||
* current.
|
||||
* task_xid_nr_ns() : id seen from the ns specified;
|
||||
*
|
||||
* set_task_vxid() : assigns a virtual id to a task;
|
||||
*
|
||||
* see also pid_nr() etc in include/linux/pid.h
|
||||
*/
|
||||
pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
|
||||
|
|
|
@ -23,10 +23,6 @@ extern u64 sched_clock_cpu(int cpu);
|
|||
extern void sched_clock_init(void);
|
||||
|
||||
#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
|
||||
static inline void sched_clock_init_late(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void sched_clock_tick(void)
|
||||
{
|
||||
}
|
||||
|
@ -39,7 +35,7 @@ static inline void sched_clock_idle_sleep_event(void)
|
|||
{
|
||||
}
|
||||
|
||||
static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
|
||||
static inline void sched_clock_idle_wakeup_event(void)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -53,7 +49,6 @@ static inline u64 local_clock(void)
|
|||
return sched_clock();
|
||||
}
|
||||
#else
|
||||
extern void sched_clock_init_late(void);
|
||||
extern int sched_clock_stable(void);
|
||||
extern void clear_sched_clock_stable(void);
|
||||
|
||||
|
@ -63,10 +58,10 @@ extern void clear_sched_clock_stable(void);
|
|||
*/
|
||||
extern u64 __sched_clock_offset;
|
||||
|
||||
|
||||
extern void sched_clock_tick(void);
|
||||
extern void sched_clock_tick_stable(void);
|
||||
extern void sched_clock_idle_sleep_event(void);
|
||||
extern void sched_clock_idle_wakeup_event(u64 delta_ns);
|
||||
extern void sched_clock_idle_wakeup_event(void);
|
||||
|
||||
/*
|
||||
* As outlined in clock.c, provides a fast, high resolution, nanosecond
|
||||
|
|
|
@ -23,11 +23,11 @@ static inline void set_cpu_sd_state_idle(void) { }
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_NO_HZ_COMMON
|
||||
void calc_load_enter_idle(void);
|
||||
void calc_load_exit_idle(void);
|
||||
void calc_load_nohz_start(void);
|
||||
void calc_load_nohz_stop(void);
|
||||
#else
|
||||
static inline void calc_load_enter_idle(void) { }
|
||||
static inline void calc_load_exit_idle(void) { }
|
||||
static inline void calc_load_nohz_start(void) { }
|
||||
static inline void calc_load_nohz_stop(void) { }
|
||||
#endif /* CONFIG_NO_HZ_COMMON */
|
||||
|
||||
#if defined(CONFIG_NO_HZ_COMMON) && defined(CONFIG_SMP)
|
||||
|
|
|
@ -95,8 +95,6 @@ static inline void put_task_struct(struct task_struct *t)
|
|||
}
|
||||
|
||||
struct task_struct *task_rcu_dereference(struct task_struct **ptask);
|
||||
struct task_struct *try_get_task_struct(struct task_struct **ptask);
|
||||
|
||||
|
||||
#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
|
||||
extern int arch_task_struct_size __read_mostly;
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
#include <linux/ktime.h>
|
||||
#include <linux/sunrpc/types.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/wait_bit.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/sunrpc/xdr.h>
|
||||
|
||||
|
|
|
@ -183,7 +183,7 @@ struct virqfd {
|
|||
void (*thread)(void *, void *);
|
||||
void *data;
|
||||
struct work_struct inject;
|
||||
wait_queue_t wait;
|
||||
wait_queue_entry_t wait;
|
||||
poll_table pt;
|
||||
struct work_struct shutdown;
|
||||
struct virqfd **pvirqfd;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
261
include/linux/wait_bit.h
Normal file
261
include/linux/wait_bit.h
Normal file
|
@ -0,0 +1,261 @@
|
|||
#ifndef _LINUX_WAIT_BIT_H
|
||||
#define _LINUX_WAIT_BIT_H
|
||||
|
||||
/*
|
||||
* Linux wait-bit related types and methods:
|
||||
*/
|
||||
#include <linux/wait.h>
|
||||
|
||||
struct wait_bit_key {
|
||||
void *flags;
|
||||
int bit_nr;
|
||||
#define WAIT_ATOMIC_T_BIT_NR -1
|
||||
unsigned long timeout;
|
||||
};
|
||||
|
||||
struct wait_bit_queue_entry {
|
||||
struct wait_bit_key key;
|
||||
struct wait_queue_entry wq_entry;
|
||||
};
|
||||
|
||||
#define __WAIT_BIT_KEY_INITIALIZER(word, bit) \
|
||||
{ .flags = word, .bit_nr = bit, }
|
||||
|
||||
#define __WAIT_ATOMIC_T_KEY_INITIALIZER(p) \
|
||||
{ .flags = p, .bit_nr = WAIT_ATOMIC_T_BIT_NR, }
|
||||
|
||||
typedef int wait_bit_action_f(struct wait_bit_key *key, int mode);
|
||||
void __wake_up_bit(struct wait_queue_head *wq_head, void *word, int bit);
|
||||
int __wait_on_bit(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode);
|
||||
int __wait_on_bit_lock(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_entry, wait_bit_action_f *action, unsigned int mode);
|
||||
void wake_up_bit(void *word, int bit);
|
||||
void wake_up_atomic_t(atomic_t *p);
|
||||
int out_of_line_wait_on_bit(void *word, int, wait_bit_action_f *action, unsigned int mode);
|
||||
int out_of_line_wait_on_bit_timeout(void *word, int, wait_bit_action_f *action, unsigned int mode, unsigned long timeout);
|
||||
int out_of_line_wait_on_bit_lock(void *word, int, wait_bit_action_f *action, unsigned int mode);
|
||||
int out_of_line_wait_on_atomic_t(atomic_t *p, int (*)(atomic_t *), unsigned int mode);
|
||||
struct wait_queue_head *bit_waitqueue(void *word, int bit);
|
||||
extern void __init wait_bit_init(void);
|
||||
|
||||
int wake_bit_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync, void *key);
|
||||
|
||||
#define DEFINE_WAIT_BIT(name, word, bit) \
|
||||
struct wait_bit_queue_entry name = { \
|
||||
.key = __WAIT_BIT_KEY_INITIALIZER(word, bit), \
|
||||
.wq_entry = { \
|
||||
.private = current, \
|
||||
.func = wake_bit_function, \
|
||||
.entry = \
|
||||
LIST_HEAD_INIT((name).wq_entry.entry), \
|
||||
}, \
|
||||
}
|
||||
|
||||
extern int bit_wait(struct wait_bit_key *key, int bit);
|
||||
extern int bit_wait_io(struct wait_bit_key *key, int bit);
|
||||
extern int bit_wait_timeout(struct wait_bit_key *key, int bit);
|
||||
extern int bit_wait_io_timeout(struct wait_bit_key *key, int bit);
|
||||
|
||||
/**
|
||||
* wait_on_bit - wait for a bit to be cleared
|
||||
* @word: the word being waited on, a kernel virtual address
|
||||
* @bit: the bit of the word being waited on
|
||||
* @mode: the task state to sleep in
|
||||
*
|
||||
* There is a standard hashed waitqueue table for generic use. This
|
||||
* is the part of the hashtable's accessor API that waits on a bit.
|
||||
* For instance, if one were to have waiters on a bitflag, one would
|
||||
* call wait_on_bit() in threads waiting for the bit to clear.
|
||||
* One uses wait_on_bit() where one is waiting for the bit to clear,
|
||||
* but has no intention of setting it.
|
||||
* Returned value will be zero if the bit was cleared, or non-zero
|
||||
* if the process received a signal and the mode permitted wakeup
|
||||
* on that signal.
|
||||
*/
|
||||
static inline int
|
||||
wait_on_bit(unsigned long *word, int bit, unsigned mode)
|
||||
{
|
||||
might_sleep();
|
||||
if (!test_bit(bit, word))
|
||||
return 0;
|
||||
return out_of_line_wait_on_bit(word, bit,
|
||||
bit_wait,
|
||||
mode);
|
||||
}
|
||||
|
||||
/**
|
||||
* wait_on_bit_io - wait for a bit to be cleared
|
||||
* @word: the word being waited on, a kernel virtual address
|
||||
* @bit: the bit of the word being waited on
|
||||
* @mode: the task state to sleep in
|
||||
*
|
||||
* Use the standard hashed waitqueue table to wait for a bit
|
||||
* to be cleared. This is similar to wait_on_bit(), but calls
|
||||
* io_schedule() instead of schedule() for the actual waiting.
|
||||
*
|
||||
* Returned value will be zero if the bit was cleared, or non-zero
|
||||
* if the process received a signal and the mode permitted wakeup
|
||||
* on that signal.
|
||||
*/
|
||||
static inline int
|
||||
wait_on_bit_io(unsigned long *word, int bit, unsigned mode)
|
||||
{
|
||||
might_sleep();
|
||||
if (!test_bit(bit, word))
|
||||
return 0;
|
||||
return out_of_line_wait_on_bit(word, bit,
|
||||
bit_wait_io,
|
||||
mode);
|
||||
}
|
||||
|
||||
/**
|
||||
* wait_on_bit_timeout - wait for a bit to be cleared or a timeout elapses
|
||||
* @word: the word being waited on, a kernel virtual address
|
||||
* @bit: the bit of the word being waited on
|
||||
* @mode: the task state to sleep in
|
||||
* @timeout: timeout, in jiffies
|
||||
*
|
||||
* Use the standard hashed waitqueue table to wait for a bit
|
||||
* to be cleared. This is similar to wait_on_bit(), except also takes a
|
||||
* timeout parameter.
|
||||
*
|
||||
* Returned value will be zero if the bit was cleared before the
|
||||
* @timeout elapsed, or non-zero if the @timeout elapsed or process
|
||||
* received a signal and the mode permitted wakeup on that signal.
|
||||
*/
|
||||
static inline int
|
||||
wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode,
|
||||
unsigned long timeout)
|
||||
{
|
||||
might_sleep();
|
||||
if (!test_bit(bit, word))
|
||||
return 0;
|
||||
return out_of_line_wait_on_bit_timeout(word, bit,
|
||||
bit_wait_timeout,
|
||||
mode, timeout);
|
||||
}
|
||||
|
||||
/**
|
||||
* wait_on_bit_action - wait for a bit to be cleared
|
||||
* @word: the word being waited on, a kernel virtual address
|
||||
* @bit: the bit of the word being waited on
|
||||
* @action: the function used to sleep, which may take special actions
|
||||
* @mode: the task state to sleep in
|
||||
*
|
||||
* Use the standard hashed waitqueue table to wait for a bit
|
||||
* to be cleared, and allow the waiting action to be specified.
|
||||
* This is like wait_on_bit() but allows fine control of how the waiting
|
||||
* is done.
|
||||
*
|
||||
* Returned value will be zero if the bit was cleared, or non-zero
|
||||
* if the process received a signal and the mode permitted wakeup
|
||||
* on that signal.
|
||||
*/
|
||||
static inline int
|
||||
wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action,
|
||||
unsigned mode)
|
||||
{
|
||||
might_sleep();
|
||||
if (!test_bit(bit, word))
|
||||
return 0;
|
||||
return out_of_line_wait_on_bit(word, bit, action, mode);
|
||||
}
|
||||
|
||||
/**
|
||||
* wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it
|
||||
* @word: the word being waited on, a kernel virtual address
|
||||
* @bit: the bit of the word being waited on
|
||||
* @mode: the task state to sleep in
|
||||
*
|
||||
* There is a standard hashed waitqueue table for generic use. This
|
||||
* is the part of the hashtable's accessor API that waits on a bit
|
||||
* when one intends to set it, for instance, trying to lock bitflags.
|
||||
* For instance, if one were to have waiters trying to set bitflag
|
||||
* and waiting for it to clear before setting it, one would call
|
||||
* wait_on_bit() in threads waiting to be able to set the bit.
|
||||
* One uses wait_on_bit_lock() where one is waiting for the bit to
|
||||
* clear with the intention of setting it, and when done, clearing it.
|
||||
*
|
||||
* Returns zero if the bit was (eventually) found to be clear and was
|
||||
* set. Returns non-zero if a signal was delivered to the process and
|
||||
* the @mode allows that signal to wake the process.
|
||||
*/
|
||||
static inline int
|
||||
wait_on_bit_lock(unsigned long *word, int bit, unsigned mode)
|
||||
{
|
||||
might_sleep();
|
||||
if (!test_and_set_bit(bit, word))
|
||||
return 0;
|
||||
return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode);
|
||||
}
|
||||
|
||||
/**
|
||||
* wait_on_bit_lock_io - wait for a bit to be cleared, when wanting to set it
|
||||
* @word: the word being waited on, a kernel virtual address
|
||||
* @bit: the bit of the word being waited on
|
||||
* @mode: the task state to sleep in
|
||||
*
|
||||
* Use the standard hashed waitqueue table to wait for a bit
|
||||
* to be cleared and then to atomically set it. This is similar
|
||||
* to wait_on_bit(), but calls io_schedule() instead of schedule()
|
||||
* for the actual waiting.
|
||||
*
|
||||
* Returns zero if the bit was (eventually) found to be clear and was
|
||||
* set. Returns non-zero if a signal was delivered to the process and
|
||||
* the @mode allows that signal to wake the process.
|
||||
*/
|
||||
static inline int
|
||||
wait_on_bit_lock_io(unsigned long *word, int bit, unsigned mode)
|
||||
{
|
||||
might_sleep();
|
||||
if (!test_and_set_bit(bit, word))
|
||||
return 0;
|
||||
return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode);
|
||||
}
|
||||
|
||||
/**
|
||||
* wait_on_bit_lock_action - wait for a bit to be cleared, when wanting to set it
|
||||
* @word: the word being waited on, a kernel virtual address
|
||||
* @bit: the bit of the word being waited on
|
||||
* @action: the function used to sleep, which may take special actions
|
||||
* @mode: the task state to sleep in
|
||||
*
|
||||
* Use the standard hashed waitqueue table to wait for a bit
|
||||
* to be cleared and then to set it, and allow the waiting action
|
||||
* to be specified.
|
||||
* This is like wait_on_bit() but allows fine control of how the waiting
|
||||
* is done.
|
||||
*
|
||||
* Returns zero if the bit was (eventually) found to be clear and was
|
||||
* set. Returns non-zero if a signal was delivered to the process and
|
||||
* the @mode allows that signal to wake the process.
|
||||
*/
|
||||
static inline int
|
||||
wait_on_bit_lock_action(unsigned long *word, int bit, wait_bit_action_f *action,
|
||||
unsigned mode)
|
||||
{
|
||||
might_sleep();
|
||||
if (!test_and_set_bit(bit, word))
|
||||
return 0;
|
||||
return out_of_line_wait_on_bit_lock(word, bit, action, mode);
|
||||
}
|
||||
|
||||
/**
|
||||
* wait_on_atomic_t - Wait for an atomic_t to become 0
|
||||
* @val: The atomic value being waited on, a kernel virtual address
|
||||
* @action: the function used to sleep, which may take special actions
|
||||
* @mode: the task state to sleep in
|
||||
*
|
||||
* Wait for an atomic_t to become 0. We abuse the bit-wait waitqueue table for
|
||||
* the purpose of getting a waitqueue, but we set the key to a bit number
|
||||
* outside of the target 'word'.
|
||||
*/
|
||||
static inline
|
||||
int wait_on_atomic_t(atomic_t *val, int (*action)(atomic_t *), unsigned mode)
|
||||
{
|
||||
might_sleep();
|
||||
if (atomic_read(val) == 0)
|
||||
return 0;
|
||||
return out_of_line_wait_on_atomic_t(val, action, mode);
|
||||
}
|
||||
|
||||
#endif /* _LINUX_WAIT_BIT_H */
|
|
@ -62,7 +62,7 @@ struct unix_sock {
|
|||
#define UNIX_GC_CANDIDATE 0
|
||||
#define UNIX_GC_MAYBE_CYCLE 1
|
||||
struct socket_wq peer_wq;
|
||||
wait_queue_t peer_wake;
|
||||
wait_queue_entry_t peer_wake;
|
||||
};
|
||||
|
||||
static inline struct unix_sock *unix_sk(const struct sock *sk)
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
#define AUTOFS_MIN_PROTO_VERSION AUTOFS_PROTO_VERSION
|
||||
|
||||
/*
|
||||
* The wait_queue_token (autofs_wqt_t) is part of a structure which is passed
|
||||
* The wait_queue_entry_token (autofs_wqt_t) is part of a structure which is passed
|
||||
* back to the kernel via ioctl from userspace. On architectures where 32- and
|
||||
* 64-bit userspace binaries can be executed it's important that the size of
|
||||
* autofs_wqt_t stays constant between 32- and 64-bit Linux kernels so that we
|
||||
|
@ -49,7 +49,7 @@ struct autofs_packet_hdr {
|
|||
|
||||
struct autofs_packet_missing {
|
||||
struct autofs_packet_hdr hdr;
|
||||
autofs_wqt_t wait_queue_token;
|
||||
autofs_wqt_t wait_queue_entry_token;
|
||||
int len;
|
||||
char name[NAME_MAX+1];
|
||||
};
|
||||
|
|
|
@ -108,7 +108,7 @@ enum autofs_notify {
|
|||
/* v4 multi expire (via pipe) */
|
||||
struct autofs_packet_expire_multi {
|
||||
struct autofs_packet_hdr hdr;
|
||||
autofs_wqt_t wait_queue_token;
|
||||
autofs_wqt_t wait_queue_entry_token;
|
||||
int len;
|
||||
char name[NAME_MAX+1];
|
||||
};
|
||||
|
@ -123,7 +123,7 @@ union autofs_packet_union {
|
|||
/* autofs v5 common packet struct */
|
||||
struct autofs_v5_packet {
|
||||
struct autofs_packet_hdr hdr;
|
||||
autofs_wqt_t wait_queue_token;
|
||||
autofs_wqt_t wait_queue_entry_token;
|
||||
__u32 dev;
|
||||
__u64 ino;
|
||||
__u32 uid;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user