forked from luck/tmp_suning_uos_patched
USB: EHCI: convert singly-linked lists to list_heads
This patch (as1664) converts ehci-hcd's async_unlink, async_iaa, and intr_unlink from singly-linked lists to standard doubly-linked list_heads. Originally it didn't seem necessary to use list_heads, because items are always added to and removed from these lists in FIFO order. But now with more list processing going on, it's easier to use the standard routines than continue with a roll-your-own approach. I don't know if the code ends up being notably shorter, but the patterns will be more familiar to any kernel hacker. Signed-off-by: Alan Stern <stern@rowland.harvard.edu> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
7655e3160c
commit
6e018751a3
|
@ -510,14 +510,16 @@ static ssize_t fill_async_buffer(struct debug_buffer *buf)
|
|||
spin_lock_irqsave (&ehci->lock, flags);
|
||||
for (qh = ehci->async->qh_next.qh; size > 0 && qh; qh = qh->qh_next.qh)
|
||||
qh_lines (ehci, qh, &next, &size);
|
||||
if (ehci->async_unlink && size > 0) {
|
||||
if (!list_empty(&ehci->async_unlink) && size > 0) {
|
||||
temp = scnprintf(next, size, "\nunlink =\n");
|
||||
size -= temp;
|
||||
next += temp;
|
||||
|
||||
for (qh = ehci->async_unlink; size > 0 && qh;
|
||||
qh = qh->unlink_next)
|
||||
qh_lines (ehci, qh, &next, &size);
|
||||
list_for_each_entry(qh, &ehci->async_unlink, unlink_node) {
|
||||
if (size <= 0)
|
||||
break;
|
||||
qh_lines(ehci, qh, &next, &size);
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore (&ehci->lock, flags);
|
||||
|
||||
|
@ -814,9 +816,10 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
|
|||
}
|
||||
}
|
||||
|
||||
if (ehci->async_unlink) {
|
||||
if (!list_empty(&ehci->async_unlink)) {
|
||||
temp = scnprintf(next, size, "async unlink qh %p\n",
|
||||
ehci->async_unlink);
|
||||
list_first_entry(&ehci->async_unlink,
|
||||
struct ehci_qh, unlink_node));
|
||||
size -= temp;
|
||||
next += temp;
|
||||
}
|
||||
|
|
|
@ -482,6 +482,9 @@ static int ehci_init(struct usb_hcd *hcd)
|
|||
* periodic_size can shrink by USBCMD update if hcc_params allows.
|
||||
*/
|
||||
ehci->periodic_size = DEFAULT_I_TDPS;
|
||||
INIT_LIST_HEAD(&ehci->async_unlink);
|
||||
INIT_LIST_HEAD(&ehci->async_iaa);
|
||||
INIT_LIST_HEAD(&ehci->intr_unlink);
|
||||
INIT_LIST_HEAD(&ehci->intr_qh_list);
|
||||
INIT_LIST_HEAD(&ehci->cached_itd_list);
|
||||
INIT_LIST_HEAD(&ehci->cached_sitd_list);
|
||||
|
@ -749,7 +752,7 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
|
|||
/* guard against (alleged) silicon errata */
|
||||
if (cmd & CMD_IAAD)
|
||||
ehci_dbg(ehci, "IAA with IAAD still set?\n");
|
||||
if (ehci->async_iaa)
|
||||
if (!list_empty(&ehci->async_iaa))
|
||||
COUNT(ehci->stats.iaa);
|
||||
end_unlink_async(ehci);
|
||||
}
|
||||
|
|
|
@ -958,8 +958,9 @@ static void disable_async(struct ehci_hcd *ehci)
|
|||
if (--ehci->async_count)
|
||||
return;
|
||||
|
||||
/* The async schedule and async_unlink list are supposed to be empty */
|
||||
WARN_ON(ehci->async->qh_next.qh || ehci->async_unlink);
|
||||
/* The async schedule and unlink lists are supposed to be empty */
|
||||
WARN_ON(ehci->async->qh_next.qh || !list_empty(&ehci->async_unlink) ||
|
||||
!list_empty(&ehci->async_iaa));
|
||||
|
||||
/* Don't turn off the schedule until ASS is 1 */
|
||||
ehci_poll_ASS(ehci);
|
||||
|
@ -1150,11 +1151,7 @@ static void single_unlink_async(struct ehci_hcd *ehci, struct ehci_qh *qh)
|
|||
|
||||
/* Add to the end of the list of QHs waiting for the next IAAD */
|
||||
qh->qh_state = QH_STATE_UNLINK_WAIT;
|
||||
if (ehci->async_unlink)
|
||||
ehci->async_unlink_last->unlink_next = qh;
|
||||
else
|
||||
ehci->async_unlink = qh;
|
||||
ehci->async_unlink_last = qh;
|
||||
list_add_tail(&qh->unlink_node, &ehci->async_unlink);
|
||||
|
||||
/* Unlink it from the schedule */
|
||||
prev = ehci->async;
|
||||
|
@ -1173,15 +1170,14 @@ static void start_iaa_cycle(struct ehci_hcd *ehci, bool nested)
|
|||
* Do nothing if an IAA cycle is already running or
|
||||
* if one will be started shortly.
|
||||
*/
|
||||
if (ehci->async_iaa || ehci->async_unlinking)
|
||||
if (!list_empty(&ehci->async_iaa) || ehci->async_unlinking)
|
||||
return;
|
||||
|
||||
/* If the controller isn't running, we don't have to wait for it */
|
||||
if (unlikely(ehci->rh_state < EHCI_RH_RUNNING)) {
|
||||
|
||||
/* Do all the waiting QHs */
|
||||
ehci->async_iaa = ehci->async_unlink;
|
||||
ehci->async_unlink = NULL;
|
||||
list_splice_tail_init(&ehci->async_unlink, &ehci->async_iaa);
|
||||
|
||||
if (!nested) /* Avoid recursion */
|
||||
end_unlink_async(ehci);
|
||||
|
@ -1191,20 +1187,18 @@ static void start_iaa_cycle(struct ehci_hcd *ehci, bool nested)
|
|||
struct ehci_qh *qh;
|
||||
|
||||
/* Do only the first waiting QH (nVidia bug?) */
|
||||
qh = ehci->async_unlink;
|
||||
qh = list_first_entry(&ehci->async_unlink, struct ehci_qh,
|
||||
unlink_node);
|
||||
|
||||
/*
|
||||
* Intel (?) bug: The HC can write back the overlay region
|
||||
* even after the IAA interrupt occurs. In self-defense,
|
||||
* always go through two IAA cycles for each QH.
|
||||
*/
|
||||
if (qh->qh_state == QH_STATE_UNLINK_WAIT) {
|
||||
if (qh->qh_state == QH_STATE_UNLINK_WAIT)
|
||||
qh->qh_state = QH_STATE_UNLINK;
|
||||
} else {
|
||||
ehci->async_iaa = qh;
|
||||
ehci->async_unlink = qh->unlink_next;
|
||||
qh->unlink_next = NULL;
|
||||
}
|
||||
else
|
||||
list_move_tail(&qh->unlink_node, &ehci->async_iaa);
|
||||
|
||||
/* Make sure the unlinks are all visible to the hardware */
|
||||
wmb();
|
||||
|
@ -1229,10 +1223,10 @@ static void end_unlink_async(struct ehci_hcd *ehci)
|
|||
/* Process the idle QHs */
|
||||
restart:
|
||||
ehci->async_unlinking = true;
|
||||
while (ehci->async_iaa) {
|
||||
qh = ehci->async_iaa;
|
||||
ehci->async_iaa = qh->unlink_next;
|
||||
qh->unlink_next = NULL;
|
||||
while (!list_empty(&ehci->async_iaa)) {
|
||||
qh = list_first_entry(&ehci->async_iaa, struct ehci_qh,
|
||||
unlink_node);
|
||||
list_del(&qh->unlink_node);
|
||||
|
||||
qh->qh_state = QH_STATE_IDLE;
|
||||
qh->qh_next.qh = NULL;
|
||||
|
@ -1247,7 +1241,7 @@ static void end_unlink_async(struct ehci_hcd *ehci)
|
|||
ehci->async_unlinking = false;
|
||||
|
||||
/* Start a new IAA cycle if any QHs are waiting for it */
|
||||
if (ehci->async_unlink) {
|
||||
if (!list_empty(&ehci->async_unlink)) {
|
||||
start_iaa_cycle(ehci, true);
|
||||
if (unlikely(ehci->rh_state < EHCI_RH_RUNNING))
|
||||
goto restart;
|
||||
|
@ -1276,7 +1270,8 @@ static void unlink_empty_async(struct ehci_hcd *ehci)
|
|||
}
|
||||
|
||||
/* If nothing else is being unlinked, unlink the last empty QH */
|
||||
if (!ehci->async_iaa && !ehci->async_unlink && qh_to_unlink) {
|
||||
if (list_empty(&ehci->async_iaa) && list_empty(&ehci->async_unlink) &&
|
||||
qh_to_unlink) {
|
||||
start_unlink_async(ehci, qh_to_unlink);
|
||||
--count;
|
||||
}
|
||||
|
|
|
@ -620,17 +620,13 @@ static void start_unlink_intr(struct ehci_hcd *ehci, struct ehci_qh *qh)
|
|||
qh->unlink_cycle = ehci->intr_unlink_cycle;
|
||||
|
||||
/* New entries go at the end of the intr_unlink list */
|
||||
if (ehci->intr_unlink)
|
||||
ehci->intr_unlink_last->unlink_next = qh;
|
||||
else
|
||||
ehci->intr_unlink = qh;
|
||||
ehci->intr_unlink_last = qh;
|
||||
list_add_tail(&qh->unlink_node, &ehci->intr_unlink);
|
||||
|
||||
if (ehci->intr_unlinking)
|
||||
; /* Avoid recursive calls */
|
||||
else if (ehci->rh_state < EHCI_RH_RUNNING)
|
||||
ehci_handle_intr_unlinks(ehci);
|
||||
else if (ehci->intr_unlink == qh) {
|
||||
else if (ehci->intr_unlink.next == &qh->unlink_node) {
|
||||
ehci_enable_event(ehci, EHCI_HRTIMER_UNLINK_INTR, true);
|
||||
++ehci->intr_unlink_cycle;
|
||||
}
|
||||
|
|
|
@ -229,18 +229,19 @@ static void ehci_handle_intr_unlinks(struct ehci_hcd *ehci)
|
|||
* process all the QHs on the list.
|
||||
*/
|
||||
ehci->intr_unlinking = true;
|
||||
while (ehci->intr_unlink) {
|
||||
struct ehci_qh *qh = ehci->intr_unlink;
|
||||
while (!list_empty(&ehci->intr_unlink)) {
|
||||
struct ehci_qh *qh;
|
||||
|
||||
qh = list_first_entry(&ehci->intr_unlink, struct ehci_qh,
|
||||
unlink_node);
|
||||
if (!stopped && qh->unlink_cycle == ehci->intr_unlink_cycle)
|
||||
break;
|
||||
ehci->intr_unlink = qh->unlink_next;
|
||||
qh->unlink_next = NULL;
|
||||
list_del(&qh->unlink_node);
|
||||
end_unlink_intr(ehci, qh);
|
||||
}
|
||||
|
||||
/* Handle remaining entries later */
|
||||
if (ehci->intr_unlink) {
|
||||
if (!list_empty(&ehci->intr_unlink)) {
|
||||
ehci_enable_event(ehci, EHCI_HRTIMER_UNLINK_INTR, true);
|
||||
++ehci->intr_unlink_cycle;
|
||||
}
|
||||
|
|
|
@ -128,9 +128,8 @@ struct ehci_hcd { /* one per controller */
|
|||
/* async schedule support */
|
||||
struct ehci_qh *async;
|
||||
struct ehci_qh *dummy; /* For AMD quirk use */
|
||||
struct ehci_qh *async_unlink;
|
||||
struct ehci_qh *async_unlink_last;
|
||||
struct ehci_qh *async_iaa;
|
||||
struct list_head async_unlink;
|
||||
struct list_head async_iaa;
|
||||
unsigned async_unlink_cycle;
|
||||
unsigned async_count; /* async activity count */
|
||||
|
||||
|
@ -143,8 +142,7 @@ struct ehci_hcd { /* one per controller */
|
|||
unsigned i_thresh; /* uframes HC might cache */
|
||||
|
||||
union ehci_shadow *pshadow; /* mirror hw periodic table */
|
||||
struct ehci_qh *intr_unlink;
|
||||
struct ehci_qh *intr_unlink_last;
|
||||
struct list_head intr_unlink;
|
||||
unsigned intr_unlink_cycle;
|
||||
unsigned now_frame; /* frame from HC hardware */
|
||||
unsigned last_iso_frame; /* last frame scanned for iso */
|
||||
|
@ -380,7 +378,7 @@ struct ehci_qh {
|
|||
struct list_head qtd_list; /* sw qtd list */
|
||||
struct list_head intr_node; /* list of intr QHs */
|
||||
struct ehci_qtd *dummy;
|
||||
struct ehci_qh *unlink_next; /* next on unlink list */
|
||||
struct list_head unlink_node;
|
||||
|
||||
unsigned unlink_cycle;
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user