forked from luck/tmp_suning_uos_patched
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "20 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: m32r: fix build warning about putc mm: workingset: printk missing log level, use pr_info() mm: thp: refix false positive BUG in page_move_anon_rmap() mm: rmap: call page_check_address() with sync enabled to avoid racy check mm: thp: move pmd check inside ptl for freeze_page() vmlinux.lds: account for destructor sections gcov: add support for gcc version >= 6 mm, meminit: ensure node is online before checking whether pages are uninitialised mm, meminit: always return a valid node from early_pfn_to_nid kasan/quarantine: fix bugs on qlist_move_cache() uapi: export lirc.h header madvise_free, thp: fix madvise_free_huge_pmd return value after splitting Revert "scripts/gdb: add documentation example for radix tree" Revert "scripts/gdb: add a Radix Tree Parser" scripts/gdb: Perform path expansion to lx-symbol's arguments scripts/gdb: add constants.py to .gitignore scripts/gdb: rebuild constants.py on dependancy change scripts/gdb: silence 'nothing to do' message kasan: add newline to messages mm, compaction: prevent VM_BUG_ON when terminating freeing scanner
This commit is contained in:
commit
fa3a9f5744
@ -139,27 +139,6 @@ Examples of using the Linux-provided gdb helpers
|
||||
start_comm = "swapper/2\000\000\000\000\000\000"
|
||||
}
|
||||
|
||||
o Dig into a radix tree data structure, such as the IRQ descriptors:
|
||||
(gdb) print (struct irq_desc)$lx_radix_tree_lookup(irq_desc_tree, 18)
|
||||
$6 = {
|
||||
irq_common_data = {
|
||||
state_use_accessors = 67584,
|
||||
handler_data = 0x0 <__vectors_start>,
|
||||
msi_desc = 0x0 <__vectors_start>,
|
||||
affinity = {{
|
||||
bits = {65535}
|
||||
}}
|
||||
},
|
||||
irq_data = {
|
||||
mask = 0,
|
||||
irq = 18,
|
||||
hwirq = 27,
|
||||
common = 0xee803d80,
|
||||
chip = 0xc0eb0854 <gic_data>,
|
||||
domain = 0xee808000,
|
||||
parent_data = 0x0 <__vectors_start>,
|
||||
chip_data = 0xc0eb0854 <gic_data>
|
||||
} <... trimmed ...>
|
||||
|
||||
List of commands and functions
|
||||
------------------------------
|
||||
|
@ -8,12 +8,13 @@
|
||||
|
||||
#include <asm/processor.h>
|
||||
|
||||
static void putc(char c);
|
||||
static void m32r_putc(char c);
|
||||
|
||||
static int puts(const char *s)
|
||||
{
|
||||
char c;
|
||||
while ((c = *s++)) putc(c);
|
||||
while ((c = *s++))
|
||||
m32r_putc(c);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -41,7 +42,7 @@ static int puts(const char *s)
|
||||
#define BOOT_SIO0TXB PLD_ESIO0TXB
|
||||
#endif
|
||||
|
||||
static void putc(char c)
|
||||
static void m32r_putc(char c)
|
||||
{
|
||||
while ((*BOOT_SIO0STS & 0x3) != 0x3)
|
||||
cpu_relax();
|
||||
@ -61,7 +62,7 @@ static void putc(char c)
|
||||
#define SIO0TXB (volatile unsigned short *)(0x00efd000 + 30)
|
||||
#endif
|
||||
|
||||
static void putc(char c)
|
||||
static void m32r_putc(char c)
|
||||
{
|
||||
while ((*SIO0STS & 0x1) == 0)
|
||||
cpu_relax();
|
||||
|
@ -54,8 +54,8 @@ static int kasan_die_handler(struct notifier_block *self,
|
||||
void *data)
|
||||
{
|
||||
if (val == DIE_GPF) {
|
||||
pr_emerg("CONFIG_KASAN_INLINE enabled");
|
||||
pr_emerg("GPF could be caused by NULL-ptr deref or user memory access");
|
||||
pr_emerg("CONFIG_KASAN_INLINE enabled\n");
|
||||
pr_emerg("GPF could be caused by NULL-ptr deref or user memory access\n");
|
||||
}
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
@ -542,15 +542,19 @@
|
||||
|
||||
#define INIT_TEXT \
|
||||
*(.init.text) \
|
||||
*(.text.startup) \
|
||||
MEM_DISCARD(init.text)
|
||||
|
||||
#define EXIT_DATA \
|
||||
*(.exit.data) \
|
||||
*(.fini_array) \
|
||||
*(.dtors) \
|
||||
MEM_DISCARD(exit.data) \
|
||||
MEM_DISCARD(exit.rodata)
|
||||
|
||||
#define EXIT_TEXT \
|
||||
*(.exit.text) \
|
||||
*(.text.exit) \
|
||||
MEM_DISCARD(exit.text)
|
||||
|
||||
#define EXIT_CALL \
|
||||
|
@ -94,7 +94,7 @@ static inline int split_huge_page(struct page *page)
|
||||
void deferred_split_huge_page(struct page *page);
|
||||
|
||||
void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
unsigned long address, bool freeze);
|
||||
unsigned long address, bool freeze, struct page *page);
|
||||
|
||||
#define split_huge_pmd(__vma, __pmd, __address) \
|
||||
do { \
|
||||
@ -102,7 +102,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
if (pmd_trans_huge(*____pmd) \
|
||||
|| pmd_devmap(*____pmd)) \
|
||||
__split_huge_pmd(__vma, __pmd, __address, \
|
||||
false); \
|
||||
false, NULL); \
|
||||
} while (0)
|
||||
|
||||
|
||||
|
@ -158,7 +158,7 @@ struct anon_vma *page_get_anon_vma(struct page *page);
|
||||
/*
|
||||
* rmap interfaces called when adding or removing pte of page
|
||||
*/
|
||||
void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
|
||||
void page_move_anon_rmap(struct page *, struct vm_area_struct *);
|
||||
void page_add_anon_rmap(struct page *, struct vm_area_struct *,
|
||||
unsigned long, bool);
|
||||
void do_page_add_anon_rmap(struct page *, struct vm_area_struct *,
|
||||
|
@ -245,6 +245,7 @@ endif
|
||||
header-y += hw_breakpoint.h
|
||||
header-y += l2tp.h
|
||||
header-y += libc-compat.h
|
||||
header-y += lirc.h
|
||||
header-y += limits.h
|
||||
header-y += llc.h
|
||||
header-y += loop.h
|
||||
|
@ -18,7 +18,7 @@
|
||||
#include <linux/vmalloc.h>
|
||||
#include "gcov.h"
|
||||
|
||||
#if __GNUC__ == 5 && __GNUC_MINOR__ >= 1
|
||||
#if (__GNUC__ > 5) || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1)
|
||||
#define GCOV_COUNTERS 10
|
||||
#elif __GNUC__ == 4 && __GNUC_MINOR__ >= 9
|
||||
#define GCOV_COUNTERS 9
|
||||
|
@ -1009,8 +1009,6 @@ static void isolate_freepages(struct compact_control *cc)
|
||||
block_end_pfn = block_start_pfn,
|
||||
block_start_pfn -= pageblock_nr_pages,
|
||||
isolate_start_pfn = block_start_pfn) {
|
||||
unsigned long isolated;
|
||||
|
||||
/*
|
||||
* This can iterate a massively long zone without finding any
|
||||
* suitable migration targets, so periodically check if we need
|
||||
@ -1034,36 +1032,30 @@ static void isolate_freepages(struct compact_control *cc)
|
||||
continue;
|
||||
|
||||
/* Found a block suitable for isolating free pages from. */
|
||||
isolated = isolate_freepages_block(cc, &isolate_start_pfn,
|
||||
block_end_pfn, freelist, false);
|
||||
/* If isolation failed early, do not continue needlessly */
|
||||
if (!isolated && isolate_start_pfn < block_end_pfn &&
|
||||
cc->nr_migratepages > cc->nr_freepages)
|
||||
break;
|
||||
isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn,
|
||||
freelist, false);
|
||||
|
||||
/*
|
||||
* If we isolated enough freepages, or aborted due to async
|
||||
* compaction being contended, terminate the loop.
|
||||
* Remember where the free scanner should restart next time,
|
||||
* which is where isolate_freepages_block() left off.
|
||||
* But if it scanned the whole pageblock, isolate_start_pfn
|
||||
* now points at block_end_pfn, which is the start of the next
|
||||
* pageblock.
|
||||
* In that case we will however want to restart at the start
|
||||
* of the previous pageblock.
|
||||
* If we isolated enough freepages, or aborted due to lock
|
||||
* contention, terminate.
|
||||
*/
|
||||
if ((cc->nr_freepages >= cc->nr_migratepages)
|
||||
|| cc->contended) {
|
||||
if (isolate_start_pfn >= block_end_pfn)
|
||||
if (isolate_start_pfn >= block_end_pfn) {
|
||||
/*
|
||||
* Restart at previous pageblock if more
|
||||
* freepages can be isolated next time.
|
||||
*/
|
||||
isolate_start_pfn =
|
||||
block_start_pfn - pageblock_nr_pages;
|
||||
}
|
||||
break;
|
||||
} else {
|
||||
} else if (isolate_start_pfn < block_end_pfn) {
|
||||
/*
|
||||
* isolate_freepages_block() should not terminate
|
||||
* prematurely unless contended, or isolated enough
|
||||
* If isolation failed early, do not continue
|
||||
* needlessly.
|
||||
*/
|
||||
VM_BUG_ON(isolate_start_pfn < block_end_pfn);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1624,14 +1624,9 @@ int madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
|
||||
if (next - addr != HPAGE_PMD_SIZE) {
|
||||
get_page(page);
|
||||
spin_unlock(ptl);
|
||||
if (split_huge_page(page)) {
|
||||
put_page(page);
|
||||
unlock_page(page);
|
||||
goto out_unlocked;
|
||||
}
|
||||
split_huge_page(page);
|
||||
put_page(page);
|
||||
unlock_page(page);
|
||||
ret = 1;
|
||||
goto out_unlocked;
|
||||
}
|
||||
|
||||
@ -2989,7 +2984,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
}
|
||||
|
||||
void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
unsigned long address, bool freeze)
|
||||
unsigned long address, bool freeze, struct page *page)
|
||||
{
|
||||
spinlock_t *ptl;
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
@ -2997,8 +2992,17 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
|
||||
|
||||
mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE);
|
||||
ptl = pmd_lock(mm, pmd);
|
||||
|
||||
/*
|
||||
* If caller asks to setup a migration entries, we need a page to check
|
||||
* pmd against. Otherwise we can end up replacing wrong page.
|
||||
*/
|
||||
VM_BUG_ON(freeze && !page);
|
||||
if (page && page != pmd_page(*pmd))
|
||||
goto out;
|
||||
|
||||
if (pmd_trans_huge(*pmd)) {
|
||||
struct page *page = pmd_page(*pmd);
|
||||
page = pmd_page(*pmd);
|
||||
if (PageMlocked(page))
|
||||
clear_page_mlock(page);
|
||||
} else if (!pmd_devmap(*pmd))
|
||||
@ -3025,24 +3029,8 @@ void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
|
||||
return;
|
||||
|
||||
pmd = pmd_offset(pud, address);
|
||||
if (!pmd_present(*pmd) || (!pmd_trans_huge(*pmd) && !pmd_devmap(*pmd)))
|
||||
return;
|
||||
|
||||
/*
|
||||
* If caller asks to setup a migration entries, we need a page to check
|
||||
* pmd against. Otherwise we can end up replacing wrong page.
|
||||
*/
|
||||
VM_BUG_ON(freeze && !page);
|
||||
if (page && page != pmd_page(*pmd))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Caller holds the mmap_sem write mode or the anon_vma lock,
|
||||
* so a huge pmd cannot materialize from under us (khugepaged
|
||||
* holds both the mmap_sem write mode and the anon_vma lock
|
||||
* write mode).
|
||||
*/
|
||||
__split_huge_pmd(vma, pmd, address, freeze);
|
||||
__split_huge_pmd(vma, pmd, address, freeze, page);
|
||||
}
|
||||
|
||||
void vma_adjust_trans_huge(struct vm_area_struct *vma,
|
||||
|
@ -3383,7 +3383,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
/* If no-one else is actually using this page, avoid the copy
|
||||
* and just make the page writable */
|
||||
if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
|
||||
page_move_anon_rmap(old_page, vma, address);
|
||||
page_move_anon_rmap(old_page, vma);
|
||||
set_huge_ptep_writable(vma, address, ptep);
|
||||
return 0;
|
||||
}
|
||||
|
@ -238,30 +238,23 @@ static void qlist_move_cache(struct qlist_head *from,
|
||||
struct qlist_head *to,
|
||||
struct kmem_cache *cache)
|
||||
{
|
||||
struct qlist_node *prev = NULL, *curr;
|
||||
struct qlist_node *curr;
|
||||
|
||||
if (unlikely(qlist_empty(from)))
|
||||
return;
|
||||
|
||||
curr = from->head;
|
||||
qlist_init(from);
|
||||
while (curr) {
|
||||
struct qlist_node *qlink = curr;
|
||||
struct kmem_cache *obj_cache = qlink_to_cache(qlink);
|
||||
struct qlist_node *next = curr->next;
|
||||
struct kmem_cache *obj_cache = qlink_to_cache(curr);
|
||||
|
||||
if (obj_cache == cache) {
|
||||
if (unlikely(from->head == qlink)) {
|
||||
from->head = curr->next;
|
||||
prev = curr;
|
||||
} else
|
||||
prev->next = curr->next;
|
||||
if (unlikely(from->tail == qlink))
|
||||
from->tail = curr->next;
|
||||
from->bytes -= cache->size;
|
||||
qlist_put(to, qlink, cache->size);
|
||||
} else {
|
||||
prev = curr;
|
||||
}
|
||||
curr = curr->next;
|
||||
if (obj_cache == cache)
|
||||
qlist_put(to, curr, obj_cache->size);
|
||||
else
|
||||
qlist_put(from, curr, obj_cache->size);
|
||||
|
||||
curr = next;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2399,8 +2399,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
* Protected against the rmap code by
|
||||
* the page lock.
|
||||
*/
|
||||
page_move_anon_rmap(compound_head(old_page),
|
||||
vma, address);
|
||||
page_move_anon_rmap(old_page, vma);
|
||||
}
|
||||
unlock_page(old_page);
|
||||
return wp_page_reuse(mm, vma, address, page_table, ptl,
|
||||
|
@ -286,7 +286,9 @@ static inline void reset_deferred_meminit(pg_data_t *pgdat)
|
||||
/* Returns true if the struct page for the pfn is uninitialised */
|
||||
static inline bool __meminit early_page_uninitialised(unsigned long pfn)
|
||||
{
|
||||
if (pfn >= NODE_DATA(early_pfn_to_nid(pfn))->first_deferred_pfn)
|
||||
int nid = early_pfn_to_nid(pfn);
|
||||
|
||||
if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
@ -1273,7 +1275,7 @@ int __meminit early_pfn_to_nid(unsigned long pfn)
|
||||
spin_lock(&early_pfn_lock);
|
||||
nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
|
||||
if (nid < 0)
|
||||
nid = 0;
|
||||
nid = first_online_node;
|
||||
spin_unlock(&early_pfn_lock);
|
||||
|
||||
return nid;
|
||||
|
12
mm/rmap.c
12
mm/rmap.c
@ -1084,23 +1084,20 @@ EXPORT_SYMBOL_GPL(page_mkclean);
|
||||
* page_move_anon_rmap - move a page to our anon_vma
|
||||
* @page: the page to move to our anon_vma
|
||||
* @vma: the vma the page belongs to
|
||||
* @address: the user virtual address mapped
|
||||
*
|
||||
* When a page belongs exclusively to one process after a COW event,
|
||||
* that page can be moved into the anon_vma that belongs to just that
|
||||
* process, so the rmap code will not search the parent or sibling
|
||||
* processes.
|
||||
*/
|
||||
void page_move_anon_rmap(struct page *page,
|
||||
struct vm_area_struct *vma, unsigned long address)
|
||||
void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma)
|
||||
{
|
||||
struct anon_vma *anon_vma = vma->anon_vma;
|
||||
|
||||
page = compound_head(page);
|
||||
|
||||
VM_BUG_ON_PAGE(!PageLocked(page), page);
|
||||
VM_BUG_ON_VMA(!anon_vma, vma);
|
||||
if (IS_ENABLED(CONFIG_DEBUG_VM) && PageTransHuge(page))
|
||||
address &= HPAGE_PMD_MASK;
|
||||
VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page);
|
||||
|
||||
anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
|
||||
/*
|
||||
@ -1427,7 +1424,8 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
|
||||
goto out;
|
||||
}
|
||||
|
||||
pte = page_check_address(page, mm, address, &ptl, 0);
|
||||
pte = page_check_address(page, mm, address, &ptl,
|
||||
PageTransCompound(page));
|
||||
if (!pte)
|
||||
goto out;
|
||||
|
||||
|
@ -491,7 +491,7 @@ static int __init workingset_init(void)
|
||||
max_order = fls_long(totalram_pages - 1);
|
||||
if (max_order > timestamp_bits)
|
||||
bucket_order = max_order - timestamp_bits;
|
||||
printk("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
|
||||
pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
|
||||
timestamp_bits, max_order, bucket_order);
|
||||
|
||||
ret = list_lru_init_key(&workingset_shadow_nodes, &shadow_nodes_key);
|
||||
|
1
scripts/gdb/linux/.gitignore
vendored
1
scripts/gdb/linux/.gitignore
vendored
@ -1,2 +1,3 @@
|
||||
*.pyc
|
||||
*.pyo
|
||||
constants.py
|
||||
|
@ -13,9 +13,11 @@ quiet_cmd_gen_constants_py = GEN $@
|
||||
$(CPP) -E -x c -P $(c_flags) $< > $@ ;\
|
||||
sed -i '1,/<!-- end-c-headers -->/d;' $@
|
||||
|
||||
$(obj)/constants.py: $(SRCTREE)/$(obj)/constants.py.in
|
||||
$(call if_changed,gen_constants_py)
|
||||
targets += constants.py
|
||||
$(obj)/constants.py: $(SRCTREE)/$(obj)/constants.py.in FORCE
|
||||
$(call if_changed_dep,gen_constants_py)
|
||||
|
||||
build_constants_py: $(obj)/constants.py
|
||||
@:
|
||||
|
||||
clean-files := *.pyc *.pyo $(if $(KBUILD_SRC),*.py) $(obj)/constants.py
|
||||
|
@ -14,7 +14,6 @@
|
||||
|
||||
#include <linux/fs.h>
|
||||
#include <linux/mount.h>
|
||||
#include <linux/radix-tree.h>
|
||||
|
||||
/* We need to stringify expanded macros so that they can be parsed */
|
||||
|
||||
@ -51,9 +50,3 @@ LX_VALUE(MNT_NOEXEC)
|
||||
LX_VALUE(MNT_NOATIME)
|
||||
LX_VALUE(MNT_NODIRATIME)
|
||||
LX_VALUE(MNT_RELATIME)
|
||||
|
||||
/* linux/radix-tree.h */
|
||||
LX_VALUE(RADIX_TREE_INDIRECT_PTR)
|
||||
LX_GDBPARSED(RADIX_TREE_HEIGHT_MASK)
|
||||
LX_GDBPARSED(RADIX_TREE_MAP_SHIFT)
|
||||
LX_GDBPARSED(RADIX_TREE_MAP_MASK)
|
||||
|
@ -1,97 +0,0 @@
|
||||
#
|
||||
# gdb helper commands and functions for Linux kernel debugging
|
||||
#
|
||||
# Radix Tree Parser
|
||||
#
|
||||
# Copyright (c) 2016 Linaro Ltd
|
||||
#
|
||||
# Authors:
|
||||
# Kieran Bingham <kieran.bingham@linaro.org>
|
||||
#
|
||||
# This work is licensed under the terms of the GNU GPL version 2.
|
||||
#
|
||||
|
||||
import gdb
|
||||
|
||||
from linux import utils
|
||||
from linux import constants
|
||||
|
||||
radix_tree_root_type = utils.CachedType("struct radix_tree_root")
|
||||
radix_tree_node_type = utils.CachedType("struct radix_tree_node")
|
||||
|
||||
|
||||
def is_indirect_ptr(node):
|
||||
long_type = utils.get_long_type()
|
||||
return (node.cast(long_type) & constants.LX_RADIX_TREE_INDIRECT_PTR)
|
||||
|
||||
|
||||
def indirect_to_ptr(node):
|
||||
long_type = utils.get_long_type()
|
||||
node_type = node.type
|
||||
indirect_ptr = node.cast(long_type) & ~constants.LX_RADIX_TREE_INDIRECT_PTR
|
||||
return indirect_ptr.cast(node_type)
|
||||
|
||||
|
||||
def maxindex(height):
|
||||
height = height & constants.LX_RADIX_TREE_HEIGHT_MASK
|
||||
return gdb.parse_and_eval("height_to_maxindex["+str(height)+"]")
|
||||
|
||||
|
||||
def lookup(root, index):
|
||||
if root.type == radix_tree_root_type.get_type().pointer():
|
||||
root = root.dereference()
|
||||
elif root.type != radix_tree_root_type.get_type():
|
||||
raise gdb.GdbError("Must be struct radix_tree_root not {}"
|
||||
.format(root.type))
|
||||
|
||||
node = root['rnode']
|
||||
if node is 0:
|
||||
return None
|
||||
|
||||
if not (is_indirect_ptr(node)):
|
||||
if (index > 0):
|
||||
return None
|
||||
return node
|
||||
|
||||
node = indirect_to_ptr(node)
|
||||
|
||||
height = node['path'] & constants.LX_RADIX_TREE_HEIGHT_MASK
|
||||
if (index > maxindex(height)):
|
||||
return None
|
||||
|
||||
shift = (height-1) * constants.LX_RADIX_TREE_MAP_SHIFT
|
||||
|
||||
while True:
|
||||
new_index = (index >> shift) & constants.LX_RADIX_TREE_MAP_MASK
|
||||
slot = node['slots'][new_index]
|
||||
|
||||
node = slot.cast(node.type.pointer()).dereference()
|
||||
if node is 0:
|
||||
return None
|
||||
|
||||
shift -= constants.LX_RADIX_TREE_MAP_SHIFT
|
||||
height -= 1
|
||||
|
||||
if (height <= 0):
|
||||
break
|
||||
|
||||
return node
|
||||
|
||||
|
||||
class LxRadixTree(gdb.Function):
|
||||
""" Lookup and return a node from a RadixTree.
|
||||
|
||||
$lx_radix_tree_lookup(root_node [, index]): Return the node at the given index.
|
||||
If index is omitted, the root node is dereferenced and returned."""
|
||||
|
||||
def __init__(self):
|
||||
super(LxRadixTree, self).__init__("lx_radix_tree_lookup")
|
||||
|
||||
def invoke(self, root, index=0):
|
||||
result = lookup(root, index)
|
||||
if result is None:
|
||||
raise gdb.GdbError("No entry in tree at index {}".format(index))
|
||||
|
||||
return result
|
||||
|
||||
LxRadixTree()
|
@ -153,7 +153,7 @@ lx-symbols command."""
|
||||
saved_state['breakpoint'].enabled = saved_state['enabled']
|
||||
|
||||
def invoke(self, arg, from_tty):
|
||||
self.module_paths = arg.split()
|
||||
self.module_paths = [os.path.expanduser(p) for p in arg.split()]
|
||||
self.module_paths.append(os.getcwd())
|
||||
|
||||
# enforce update
|
||||
|
@ -31,4 +31,3 @@ else:
|
||||
import linux.lists
|
||||
import linux.proc
|
||||
import linux.constants
|
||||
import linux.radixtree
|
||||
|
Loading…
Reference in New Issue
Block a user