mirror of
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-09-18 22:14:16 +00:00
Merge branch 'common/clkfwk' into sh-latest
This commit is contained in:
commit
33cd5cffd5
21 changed files with 265 additions and 311 deletions
|
@ -45,7 +45,6 @@ void leon_pci_init(struct platform_device *ofdev, struct leon_pci_info *info)
|
|||
|
||||
void __devinit pcibios_fixup_bus(struct pci_bus *pbus)
|
||||
{
|
||||
struct leon_pci_info *info = pbus->sysdata;
|
||||
struct pci_dev *dev;
|
||||
int i, has_io, has_mem;
|
||||
u16 cmd;
|
||||
|
@ -111,18 +110,6 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
|
|||
return pci_enable_resources(dev, mask);
|
||||
}
|
||||
|
||||
struct device_node *pci_device_to_OF_node(struct pci_dev *pdev)
|
||||
{
|
||||
/*
|
||||
* Currently the OpenBoot nodes are not connected with the PCI device,
|
||||
* this is because the LEON PROM does not create PCI nodes. Eventually
|
||||
* this will change and the same approach as pcic.c can be used to
|
||||
* match PROM nodes with pci devices.
|
||||
*/
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(pci_device_to_OF_node);
|
||||
|
||||
void __devinit pcibios_update_irq(struct pci_dev *dev, int irq)
|
||||
{
|
||||
#ifdef CONFIG_PCI_DEBUG
|
||||
|
|
|
@ -225,6 +225,8 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
|
|||
unsigned long g2;
|
||||
int from_user = !(regs->psr & PSR_PS);
|
||||
int fault, code;
|
||||
unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
|
||||
(write ? FAULT_FLAG_WRITE : 0));
|
||||
|
||||
if(text_fault)
|
||||
address = regs->pc;
|
||||
|
@ -251,6 +253,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
|
|||
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
|
||||
|
||||
retry:
|
||||
down_read(&mm->mmap_sem);
|
||||
|
||||
/*
|
||||
|
@ -289,7 +292,11 @@ good_area:
|
|||
* make sure we exit gracefully rather than endlessly redo
|
||||
* the fault.
|
||||
*/
|
||||
fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
|
||||
fault = handle_mm_fault(mm, vma, address, flags);
|
||||
|
||||
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
|
||||
return;
|
||||
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
if (fault & VM_FAULT_OOM)
|
||||
goto out_of_memory;
|
||||
|
@ -297,13 +304,29 @@ good_area:
|
|||
goto do_sigbus;
|
||||
BUG();
|
||||
}
|
||||
if (fault & VM_FAULT_MAJOR) {
|
||||
current->maj_flt++;
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
|
||||
} else {
|
||||
current->min_flt++;
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
|
||||
|
||||
if (flags & FAULT_FLAG_ALLOW_RETRY) {
|
||||
if (fault & VM_FAULT_MAJOR) {
|
||||
current->maj_flt++;
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
|
||||
1, regs, address);
|
||||
} else {
|
||||
current->min_flt++;
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
|
||||
1, regs, address);
|
||||
}
|
||||
if (fault & VM_FAULT_RETRY) {
|
||||
flags &= ~FAULT_FLAG_ALLOW_RETRY;
|
||||
|
||||
/* No need to up_read(&mm->mmap_sem) as we would
|
||||
* have already released it in __lock_page_or_retry
|
||||
* in mm/filemap.c.
|
||||
*/
|
||||
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
|
||||
up_read(&mm->mmap_sem);
|
||||
return;
|
||||
|
||||
|
|
|
@ -279,6 +279,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
|
|||
unsigned int insn = 0;
|
||||
int si_code, fault_code, fault;
|
||||
unsigned long address, mm_rss;
|
||||
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
|
||||
|
||||
fault_code = get_thread_fault_code();
|
||||
|
||||
|
@ -333,6 +334,8 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
|
|||
insn = get_fault_insn(regs, insn);
|
||||
goto handle_kernel_fault;
|
||||
}
|
||||
|
||||
retry:
|
||||
down_read(&mm->mmap_sem);
|
||||
}
|
||||
|
||||
|
@ -423,7 +426,12 @@ good_area:
|
|||
goto bad_area;
|
||||
}
|
||||
|
||||
fault = handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE) ? FAULT_FLAG_WRITE : 0);
|
||||
flags |= ((fault_code & FAULT_CODE_WRITE) ? FAULT_FLAG_WRITE : 0);
|
||||
fault = handle_mm_fault(mm, vma, address, flags);
|
||||
|
||||
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
|
||||
return;
|
||||
|
||||
if (unlikely(fault & VM_FAULT_ERROR)) {
|
||||
if (fault & VM_FAULT_OOM)
|
||||
goto out_of_memory;
|
||||
|
@ -431,12 +439,27 @@ good_area:
|
|||
goto do_sigbus;
|
||||
BUG();
|
||||
}
|
||||
if (fault & VM_FAULT_MAJOR) {
|
||||
current->maj_flt++;
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
|
||||
} else {
|
||||
current->min_flt++;
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
|
||||
|
||||
if (flags & FAULT_FLAG_ALLOW_RETRY) {
|
||||
if (fault & VM_FAULT_MAJOR) {
|
||||
current->maj_flt++;
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
|
||||
1, regs, address);
|
||||
} else {
|
||||
current->min_flt++;
|
||||
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
|
||||
1, regs, address);
|
||||
}
|
||||
if (fault & VM_FAULT_RETRY) {
|
||||
flags &= ~FAULT_FLAG_ALLOW_RETRY;
|
||||
|
||||
/* No need to up_read(&mm->mmap_sem) as we would
|
||||
* have already released it in __lock_page_or_retry
|
||||
* in mm/filemap.c.
|
||||
*/
|
||||
|
||||
goto retry;
|
||||
}
|
||||
}
|
||||
up_read(&mm->mmap_sem);
|
||||
|
||||
|
|
|
@ -3,41 +3,6 @@
|
|||
|
||||
#include <asm/types.h>
|
||||
|
||||
#if defined(__KERNEL__)
|
||||
|
||||
# include <asm/byteorder.h>
|
||||
|
||||
# if defined(__BIG_ENDIAN)
|
||||
# define ntohll(x) (x)
|
||||
# define htonll(x) (x)
|
||||
# elif defined(__LITTLE_ENDIAN)
|
||||
# define ntohll(x) be64_to_cpu(x)
|
||||
# define htonll(x) cpu_to_be64(x)
|
||||
# else
|
||||
# error "Could not determine byte order"
|
||||
# endif
|
||||
|
||||
#else
|
||||
/* For the definition of ntohl, htonl and __BYTE_ORDER */
|
||||
#include <endian.h>
|
||||
#include <netinet/in.h>
|
||||
#if defined(__BYTE_ORDER)
|
||||
|
||||
# if __BYTE_ORDER == __BIG_ENDIAN
|
||||
# define ntohll(x) (x)
|
||||
# define htonll(x) (x)
|
||||
# elif __BYTE_ORDER == __LITTLE_ENDIAN
|
||||
# define ntohll(x) bswap_64(x)
|
||||
# define htonll(x) bswap_64(x)
|
||||
# else
|
||||
# error "Could not determine byte order: __BYTE_ORDER uncorrectly defined"
|
||||
# endif
|
||||
|
||||
#else /* ! defined(__BYTE_ORDER) */
|
||||
# error "Could not determine byte order: __BYTE_ORDER not defined"
|
||||
#endif
|
||||
#endif /* ! defined(__KERNEL__) */
|
||||
|
||||
extern int init_cow_file(int fd, char *cow_file, char *backing_file,
|
||||
int sectorsize, int alignment, int *bitmap_offset_out,
|
||||
unsigned long *bitmap_len_out, int *data_offset_out);
|
||||
|
|
|
@ -8,11 +8,10 @@
|
|||
* that.
|
||||
*/
|
||||
#include <unistd.h>
|
||||
#include <byteswap.h>
|
||||
#include <errno.h>
|
||||
#include <string.h>
|
||||
#include <arpa/inet.h>
|
||||
#include <asm/types.h>
|
||||
#include <endian.h>
|
||||
#include "cow.h"
|
||||
#include "cow_sys.h"
|
||||
|
||||
|
@ -214,8 +213,8 @@ int write_cow_header(char *cow_file, int fd, char *backing_file,
|
|||
"header\n");
|
||||
goto out;
|
||||
}
|
||||
header->magic = htonl(COW_MAGIC);
|
||||
header->version = htonl(COW_VERSION);
|
||||
header->magic = htobe32(COW_MAGIC);
|
||||
header->version = htobe32(COW_VERSION);
|
||||
|
||||
err = -EINVAL;
|
||||
if (strlen(backing_file) > sizeof(header->backing_file) - 1) {
|
||||
|
@ -246,10 +245,10 @@ int write_cow_header(char *cow_file, int fd, char *backing_file,
|
|||
goto out_free;
|
||||
}
|
||||
|
||||
header->mtime = htonl(modtime);
|
||||
header->size = htonll(*size);
|
||||
header->sectorsize = htonl(sectorsize);
|
||||
header->alignment = htonl(alignment);
|
||||
header->mtime = htobe32(modtime);
|
||||
header->size = htobe64(*size);
|
||||
header->sectorsize = htobe32(sectorsize);
|
||||
header->alignment = htobe32(alignment);
|
||||
header->cow_format = COW_BITMAP;
|
||||
|
||||
err = cow_write_file(fd, header, sizeof(*header));
|
||||
|
@ -301,8 +300,8 @@ int read_cow_header(int (*reader)(__u64, char *, int, void *), void *arg,
|
|||
magic = header->v1.magic;
|
||||
if (magic == COW_MAGIC)
|
||||
version = header->v1.version;
|
||||
else if (magic == ntohl(COW_MAGIC))
|
||||
version = ntohl(header->v1.version);
|
||||
else if (magic == be32toh(COW_MAGIC))
|
||||
version = be32toh(header->v1.version);
|
||||
/* No error printed because the non-COW case comes through here */
|
||||
else goto out;
|
||||
|
||||
|
@ -327,9 +326,9 @@ int read_cow_header(int (*reader)(__u64, char *, int, void *), void *arg,
|
|||
"header\n");
|
||||
goto out;
|
||||
}
|
||||
*mtime_out = ntohl(header->v2.mtime);
|
||||
*size_out = ntohll(header->v2.size);
|
||||
*sectorsize_out = ntohl(header->v2.sectorsize);
|
||||
*mtime_out = be32toh(header->v2.mtime);
|
||||
*size_out = be64toh(header->v2.size);
|
||||
*sectorsize_out = be32toh(header->v2.sectorsize);
|
||||
*bitmap_offset_out = sizeof(header->v2);
|
||||
*align_out = *sectorsize_out;
|
||||
file = header->v2.backing_file;
|
||||
|
@ -341,10 +340,10 @@ int read_cow_header(int (*reader)(__u64, char *, int, void *), void *arg,
|
|||
"header\n");
|
||||
goto out;
|
||||
}
|
||||
*mtime_out = ntohl(header->v3.mtime);
|
||||
*size_out = ntohll(header->v3.size);
|
||||
*sectorsize_out = ntohl(header->v3.sectorsize);
|
||||
*align_out = ntohl(header->v3.alignment);
|
||||
*mtime_out = be32toh(header->v3.mtime);
|
||||
*size_out = be64toh(header->v3.size);
|
||||
*sectorsize_out = be32toh(header->v3.sectorsize);
|
||||
*align_out = be32toh(header->v3.alignment);
|
||||
if (*align_out == 0) {
|
||||
cow_printf("read_cow_header - invalid COW header, "
|
||||
"align == 0\n");
|
||||
|
@ -366,16 +365,16 @@ int read_cow_header(int (*reader)(__u64, char *, int, void *), void *arg,
|
|||
* this was used until Dec2005 - 64bits are needed to represent
|
||||
* 2038+. I.e. we can safely do this truncating cast.
|
||||
*
|
||||
* Additionally, we must use ntohl() instead of ntohll(), since
|
||||
* Additionally, we must use be32toh() instead of be64toh(), since
|
||||
* the program used to use the former (tested - I got mtime
|
||||
* mismatch "0 vs whatever").
|
||||
*
|
||||
* Ever heard about bug-to-bug-compatibility ? ;-) */
|
||||
*mtime_out = (time32_t) ntohl(header->v3_b.mtime);
|
||||
*mtime_out = (time32_t) be32toh(header->v3_b.mtime);
|
||||
|
||||
*size_out = ntohll(header->v3_b.size);
|
||||
*sectorsize_out = ntohl(header->v3_b.sectorsize);
|
||||
*align_out = ntohl(header->v3_b.alignment);
|
||||
*size_out = be64toh(header->v3_b.size);
|
||||
*sectorsize_out = be32toh(header->v3_b.sectorsize);
|
||||
*align_out = be32toh(header->v3_b.alignment);
|
||||
if (*align_out == 0) {
|
||||
cow_printf("read_cow_header - invalid COW header, "
|
||||
"align == 0\n");
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include <linux/workqueue.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/switch_to.h>
|
||||
|
||||
#include "init.h"
|
||||
#include "irq_kern.h"
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
generic-y += bug.h cputime.h device.h emergency-restart.h futex.h hardirq.h
|
||||
generic-y += hw_irq.h irq_regs.h kdebug.h percpu.h sections.h topology.h xor.h
|
||||
generic-y += ftrace.h pci.h io.h param.h delay.h mutex.h current.h
|
||||
generic-y += ftrace.h pci.h io.h param.h delay.h mutex.h current.h exec.h
|
||||
generic-y += switch_to.h
|
||||
|
|
|
@ -3,9 +3,10 @@
|
|||
# Licensed under the GPL
|
||||
#
|
||||
|
||||
CPPFLAGS_vmlinux.lds := -DSTART=$(LDS_START) \
|
||||
-DELF_ARCH=$(LDS_ELF_ARCH) \
|
||||
-DELF_FORMAT=$(LDS_ELF_FORMAT)
|
||||
CPPFLAGS_vmlinux.lds := -DSTART=$(LDS_START) \
|
||||
-DELF_ARCH=$(LDS_ELF_ARCH) \
|
||||
-DELF_FORMAT=$(LDS_ELF_FORMAT) \
|
||||
$(LDS_EXTRA)
|
||||
extra-y := vmlinux.lds
|
||||
clean-files :=
|
||||
|
||||
|
|
|
@ -88,11 +88,8 @@ static inline void set_current(struct task_struct *task)
|
|||
|
||||
extern void arch_switch_to(struct task_struct *to);
|
||||
|
||||
void *_switch_to(void *prev, void *next, void *last)
|
||||
void *__switch_to(struct task_struct *from, struct task_struct *to)
|
||||
{
|
||||
struct task_struct *from = prev;
|
||||
struct task_struct *to = next;
|
||||
|
||||
to->thread.prev_sched = from;
|
||||
set_current(to);
|
||||
|
||||
|
@ -111,7 +108,6 @@ void *_switch_to(void *prev, void *next, void *last)
|
|||
} while (current->thread.saved_task);
|
||||
|
||||
return current->thread.prev_sched;
|
||||
|
||||
}
|
||||
|
||||
void interrupt_end(void)
|
||||
|
|
|
@ -103,7 +103,6 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm)
|
|||
|
||||
void uml_setup_stubs(struct mm_struct *mm)
|
||||
{
|
||||
struct page **pages;
|
||||
int err, ret;
|
||||
|
||||
if (!skas_needs_stub)
|
||||
|
|
|
@ -14,6 +14,9 @@ LINK-y += $(call cc-option,-m32)
|
|||
|
||||
export LDFLAGS
|
||||
|
||||
LDS_EXTRA := -Ui386
|
||||
export LDS_EXTRA
|
||||
|
||||
# First of all, tune CFLAGS for the specific CPU. This actually sets cflags-y.
|
||||
include $(srctree)/arch/x86/Makefile_32.cpu
|
||||
|
||||
|
|
75
arch/x86/um/asm/barrier.h
Normal file
75
arch/x86/um/asm/barrier.h
Normal file
|
@ -0,0 +1,75 @@
|
|||
#ifndef _ASM_UM_BARRIER_H_
|
||||
#define _ASM_UM_BARRIER_H_
|
||||
|
||||
#include <asm/asm.h>
|
||||
#include <asm/segment.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/cmpxchg.h>
|
||||
#include <asm/nops.h>
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/irqflags.h>
|
||||
|
||||
/*
|
||||
* Force strict CPU ordering.
|
||||
* And yes, this is required on UP too when we're talking
|
||||
* to devices.
|
||||
*/
|
||||
#ifdef CONFIG_X86_32
|
||||
|
||||
#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
|
||||
#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
|
||||
#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
|
||||
|
||||
#else /* CONFIG_X86_32 */
|
||||
|
||||
#define mb() asm volatile("mfence" : : : "memory")
|
||||
#define rmb() asm volatile("lfence" : : : "memory")
|
||||
#define wmb() asm volatile("sfence" : : : "memory")
|
||||
|
||||
#endif /* CONFIG_X86_32 */
|
||||
|
||||
#define read_barrier_depends() do { } while (0)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
#define smp_mb() mb()
|
||||
#ifdef CONFIG_X86_PPRO_FENCE
|
||||
#define smp_rmb() rmb()
|
||||
#else /* CONFIG_X86_PPRO_FENCE */
|
||||
#define smp_rmb() barrier()
|
||||
#endif /* CONFIG_X86_PPRO_FENCE */
|
||||
|
||||
#ifdef CONFIG_X86_OOSTORE
|
||||
#define smp_wmb() wmb()
|
||||
#else /* CONFIG_X86_OOSTORE */
|
||||
#define smp_wmb() barrier()
|
||||
#endif /* CONFIG_X86_OOSTORE */
|
||||
|
||||
#define smp_read_barrier_depends() read_barrier_depends()
|
||||
#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
|
||||
|
||||
#else /* CONFIG_SMP */
|
||||
|
||||
#define smp_mb() barrier()
|
||||
#define smp_rmb() barrier()
|
||||
#define smp_wmb() barrier()
|
||||
#define smp_read_barrier_depends() do { } while (0)
|
||||
#define set_mb(var, value) do { var = value; barrier(); } while (0)
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
/*
|
||||
* Stop RDTSC speculation. This is needed when you need to use RDTSC
|
||||
* (or get_cycles or vread that possibly accesses the TSC) in a defined
|
||||
* code region.
|
||||
*
|
||||
* (Could use an alternative three way for this if there was one.)
|
||||
*/
|
||||
static inline void rdtsc_barrier(void)
|
||||
{
|
||||
alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
|
||||
alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
|
||||
}
|
||||
|
||||
#endif
|
|
@ -1,135 +0,0 @@
|
|||
#ifndef _ASM_X86_SYSTEM_H_
|
||||
#define _ASM_X86_SYSTEM_H_
|
||||
|
||||
#include <asm/asm.h>
|
||||
#include <asm/segment.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/cmpxchg.h>
|
||||
#include <asm/nops.h>
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/irqflags.h>
|
||||
|
||||
/* entries in ARCH_DLINFO: */
|
||||
#ifdef CONFIG_IA32_EMULATION
|
||||
# define AT_VECTOR_SIZE_ARCH 2
|
||||
#else
|
||||
# define AT_VECTOR_SIZE_ARCH 1
|
||||
#endif
|
||||
|
||||
extern unsigned long arch_align_stack(unsigned long sp);
|
||||
|
||||
void default_idle(void);
|
||||
|
||||
/*
|
||||
* Force strict CPU ordering.
|
||||
* And yes, this is required on UP too when we're talking
|
||||
* to devices.
|
||||
*/
|
||||
#ifdef CONFIG_X86_32
|
||||
/*
|
||||
* Some non-Intel clones support out of order store. wmb() ceases to be a
|
||||
* nop for these.
|
||||
*/
|
||||
#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
|
||||
#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
|
||||
#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
|
||||
#else
|
||||
#define mb() asm volatile("mfence":::"memory")
|
||||
#define rmb() asm volatile("lfence":::"memory")
|
||||
#define wmb() asm volatile("sfence" ::: "memory")
|
||||
#endif
|
||||
|
||||
/**
|
||||
* read_barrier_depends - Flush all pending reads that subsequents reads
|
||||
* depend on.
|
||||
*
|
||||
* No data-dependent reads from memory-like regions are ever reordered
|
||||
* over this barrier. All reads preceding this primitive are guaranteed
|
||||
* to access memory (but not necessarily other CPUs' caches) before any
|
||||
* reads following this primitive that depend on the data return by
|
||||
* any of the preceding reads. This primitive is much lighter weight than
|
||||
* rmb() on most CPUs, and is never heavier weight than is
|
||||
* rmb().
|
||||
*
|
||||
* These ordering constraints are respected by both the local CPU
|
||||
* and the compiler.
|
||||
*
|
||||
* Ordering is not guaranteed by anything other than these primitives,
|
||||
* not even by data dependencies. See the documentation for
|
||||
* memory_barrier() for examples and URLs to more information.
|
||||
*
|
||||
* For example, the following code would force ordering (the initial
|
||||
* value of "a" is zero, "b" is one, and "p" is "&a"):
|
||||
*
|
||||
* <programlisting>
|
||||
* CPU 0 CPU 1
|
||||
*
|
||||
* b = 2;
|
||||
* memory_barrier();
|
||||
* p = &b; q = p;
|
||||
* read_barrier_depends();
|
||||
* d = *q;
|
||||
* </programlisting>
|
||||
*
|
||||
* because the read of "*q" depends on the read of "p" and these
|
||||
* two reads are separated by a read_barrier_depends(). However,
|
||||
* the following code, with the same initial values for "a" and "b":
|
||||
*
|
||||
* <programlisting>
|
||||
* CPU 0 CPU 1
|
||||
*
|
||||
* a = 2;
|
||||
* memory_barrier();
|
||||
* b = 3; y = b;
|
||||
* read_barrier_depends();
|
||||
* x = a;
|
||||
* </programlisting>
|
||||
*
|
||||
* does not enforce ordering, since there is no data dependency between
|
||||
* the read of "a" and the read of "b". Therefore, on some CPUs, such
|
||||
* as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
|
||||
* in cases like this where there are no data dependencies.
|
||||
**/
|
||||
|
||||
#define read_barrier_depends() do { } while (0)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#define smp_mb() mb()
|
||||
#ifdef CONFIG_X86_PPRO_FENCE
|
||||
# define smp_rmb() rmb()
|
||||
#else
|
||||
# define smp_rmb() barrier()
|
||||
#endif
|
||||
#ifdef CONFIG_X86_OOSTORE
|
||||
# define smp_wmb() wmb()
|
||||
#else
|
||||
# define smp_wmb() barrier()
|
||||
#endif
|
||||
#define smp_read_barrier_depends() read_barrier_depends()
|
||||
#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
|
||||
#else
|
||||
#define smp_mb() barrier()
|
||||
#define smp_rmb() barrier()
|
||||
#define smp_wmb() barrier()
|
||||
#define smp_read_barrier_depends() do { } while (0)
|
||||
#define set_mb(var, value) do { var = value; barrier(); } while (0)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Stop RDTSC speculation. This is needed when you need to use RDTSC
|
||||
* (or get_cycles or vread that possibly accesses the TSC) in a defined
|
||||
* code region.
|
||||
*
|
||||
* (Could use an alternative three way for this if there was one.)
|
||||
*/
|
||||
static inline void rdtsc_barrier(void)
|
||||
{
|
||||
alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
|
||||
alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
|
||||
}
|
||||
|
||||
extern void *_switch_to(void *prev, void *next, void *last);
|
||||
#define switch_to(prev, next, last) prev = _switch_to(prev, next, last)
|
||||
|
||||
#endif
|
|
@ -627,7 +627,7 @@ config CRYPTO_BLOWFISH_COMMON
|
|||
|
||||
config CRYPTO_BLOWFISH_X86_64
|
||||
tristate "Blowfish cipher algorithm (x86_64)"
|
||||
depends on (X86 || UML_X86) && 64BIT
|
||||
depends on X86 && 64BIT
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_BLOWFISH_COMMON
|
||||
help
|
||||
|
@ -657,7 +657,7 @@ config CRYPTO_CAMELLIA
|
|||
|
||||
config CRYPTO_CAMELLIA_X86_64
|
||||
tristate "Camellia cipher algorithm (x86_64)"
|
||||
depends on (X86 || UML_X86) && 64BIT
|
||||
depends on X86 && 64BIT
|
||||
depends on CRYPTO
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_LRW
|
||||
|
@ -893,7 +893,7 @@ config CRYPTO_TWOFISH_X86_64
|
|||
|
||||
config CRYPTO_TWOFISH_X86_64_3WAY
|
||||
tristate "Twofish cipher algorithm (x86_64, 3-way parallel)"
|
||||
depends on (X86 || UML_X86) && 64BIT
|
||||
depends on X86 && 64BIT
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_TWOFISH_COMMON
|
||||
select CRYPTO_TWOFISH_X86_64
|
||||
|
|
|
@ -182,7 +182,6 @@ static int i2c_dw_pci_resume(struct device *dev)
|
|||
pci_restore_state(pdev);
|
||||
|
||||
i2c_dw_init(i2c);
|
||||
i2c_dw_enable(i2c);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -214,7 +214,7 @@ static struct of_device_id __devinitdata of_anatop_regulator_match_tbl[] = {
|
|||
{ /* end */ }
|
||||
};
|
||||
|
||||
static struct platform_driver anatop_regulator = {
|
||||
static struct platform_driver anatop_regulator_driver = {
|
||||
.driver = {
|
||||
.name = "anatop_regulator",
|
||||
.owner = THIS_MODULE,
|
||||
|
@ -226,13 +226,13 @@ static struct platform_driver anatop_regulator = {
|
|||
|
||||
static int __init anatop_regulator_init(void)
|
||||
{
|
||||
return platform_driver_register(&anatop_regulator);
|
||||
return platform_driver_register(&anatop_regulator_driver);
|
||||
}
|
||||
postcore_initcall(anatop_regulator_init);
|
||||
|
||||
static void __exit anatop_regulator_exit(void)
|
||||
{
|
||||
platform_driver_unregister(&anatop_regulator);
|
||||
platform_driver_unregister(&anatop_regulator_driver);
|
||||
}
|
||||
module_exit(anatop_regulator_exit);
|
||||
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
* Helper routines for SuperH Clock Pulse Generator blocks (CPG).
|
||||
*
|
||||
* Copyright (C) 2010 Magnus Damm
|
||||
* Copyright (C) 2010 - 2012 Paul Mundt
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
|
@ -13,26 +14,44 @@
|
|||
#include <linux/io.h>
|
||||
#include <linux/sh_clk.h>
|
||||
|
||||
static int sh_clk_mstp32_enable(struct clk *clk)
|
||||
static unsigned int sh_clk_read(struct clk *clk)
|
||||
{
|
||||
iowrite32(ioread32(clk->mapped_reg) & ~(1 << clk->enable_bit),
|
||||
clk->mapped_reg);
|
||||
if (clk->flags & CLK_ENABLE_REG_8BIT)
|
||||
return ioread8(clk->mapped_reg);
|
||||
else if (clk->flags & CLK_ENABLE_REG_16BIT)
|
||||
return ioread16(clk->mapped_reg);
|
||||
|
||||
return ioread32(clk->mapped_reg);
|
||||
}
|
||||
|
||||
static void sh_clk_write(int value, struct clk *clk)
|
||||
{
|
||||
if (clk->flags & CLK_ENABLE_REG_8BIT)
|
||||
iowrite8(value, clk->mapped_reg);
|
||||
else if (clk->flags & CLK_ENABLE_REG_16BIT)
|
||||
iowrite16(value, clk->mapped_reg);
|
||||
else
|
||||
iowrite32(value, clk->mapped_reg);
|
||||
}
|
||||
|
||||
static int sh_clk_mstp_enable(struct clk *clk)
|
||||
{
|
||||
sh_clk_write(sh_clk_read(clk) & ~(1 << clk->enable_bit), clk);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sh_clk_mstp32_disable(struct clk *clk)
|
||||
static void sh_clk_mstp_disable(struct clk *clk)
|
||||
{
|
||||
iowrite32(ioread32(clk->mapped_reg) | (1 << clk->enable_bit),
|
||||
clk->mapped_reg);
|
||||
sh_clk_write(sh_clk_read(clk) | (1 << clk->enable_bit), clk);
|
||||
}
|
||||
|
||||
static struct sh_clk_ops sh_clk_mstp32_clk_ops = {
|
||||
.enable = sh_clk_mstp32_enable,
|
||||
.disable = sh_clk_mstp32_disable,
|
||||
static struct sh_clk_ops sh_clk_mstp_clk_ops = {
|
||||
.enable = sh_clk_mstp_enable,
|
||||
.disable = sh_clk_mstp_disable,
|
||||
.recalc = followparent_recalc,
|
||||
};
|
||||
|
||||
int __init sh_clk_mstp32_register(struct clk *clks, int nr)
|
||||
int __init sh_clk_mstp_register(struct clk *clks, int nr)
|
||||
{
|
||||
struct clk *clkp;
|
||||
int ret = 0;
|
||||
|
@ -40,7 +59,7 @@ int __init sh_clk_mstp32_register(struct clk *clks, int nr)
|
|||
|
||||
for (k = 0; !ret && (k < nr); k++) {
|
||||
clkp = clks + k;
|
||||
clkp->ops = &sh_clk_mstp32_clk_ops;
|
||||
clkp->ops = &sh_clk_mstp_clk_ops;
|
||||
ret |= clk_register(clkp);
|
||||
}
|
||||
|
||||
|
@ -72,7 +91,7 @@ static unsigned long sh_clk_div6_recalc(struct clk *clk)
|
|||
clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
|
||||
table, NULL);
|
||||
|
||||
idx = ioread32(clk->mapped_reg) & 0x003f;
|
||||
idx = sh_clk_read(clk) & 0x003f;
|
||||
|
||||
return clk->freq_table[idx].frequency;
|
||||
}
|
||||
|
@ -98,10 +117,10 @@ static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
|
|||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
value = ioread32(clk->mapped_reg) &
|
||||
value = sh_clk_read(clk) &
|
||||
~(((1 << clk->src_width) - 1) << clk->src_shift);
|
||||
|
||||
iowrite32(value | (i << clk->src_shift), clk->mapped_reg);
|
||||
sh_clk_write(value | (i << clk->src_shift), clk);
|
||||
|
||||
/* Rebuild the frequency table */
|
||||
clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
|
||||
|
@ -119,10 +138,10 @@ static int sh_clk_div6_set_rate(struct clk *clk, unsigned long rate)
|
|||
if (idx < 0)
|
||||
return idx;
|
||||
|
||||
value = ioread32(clk->mapped_reg);
|
||||
value = sh_clk_read(clk);
|
||||
value &= ~0x3f;
|
||||
value |= idx;
|
||||
iowrite32(value, clk->mapped_reg);
|
||||
sh_clk_write(value, clk);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -133,9 +152,9 @@ static int sh_clk_div6_enable(struct clk *clk)
|
|||
|
||||
ret = sh_clk_div6_set_rate(clk, clk->rate);
|
||||
if (ret == 0) {
|
||||
value = ioread32(clk->mapped_reg);
|
||||
value = sh_clk_read(clk);
|
||||
value &= ~0x100; /* clear stop bit to enable clock */
|
||||
iowrite32(value, clk->mapped_reg);
|
||||
sh_clk_write(value, clk);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
@ -144,10 +163,10 @@ static void sh_clk_div6_disable(struct clk *clk)
|
|||
{
|
||||
unsigned long value;
|
||||
|
||||
value = ioread32(clk->mapped_reg);
|
||||
value = sh_clk_read(clk);
|
||||
value |= 0x100; /* stop clock */
|
||||
value |= 0x3f; /* VDIV bits must be non-zero, overwrite divider */
|
||||
iowrite32(value, clk->mapped_reg);
|
||||
sh_clk_write(value, clk);
|
||||
}
|
||||
|
||||
static struct sh_clk_ops sh_clk_div6_clk_ops = {
|
||||
|
@ -182,7 +201,7 @@ static int __init sh_clk_init_parent(struct clk *clk)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
val = (ioread32(clk->mapped_reg) >> clk->src_shift);
|
||||
val = (sh_clk_read(clk) >> clk->src_shift);
|
||||
val &= (1 << clk->src_width) - 1;
|
||||
|
||||
if (val >= clk->parent_num) {
|
||||
|
@ -252,7 +271,7 @@ static unsigned long sh_clk_div4_recalc(struct clk *clk)
|
|||
clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
|
||||
table, &clk->arch_flags);
|
||||
|
||||
idx = (ioread32(clk->mapped_reg) >> clk->enable_bit) & 0x000f;
|
||||
idx = (sh_clk_read(clk) >> clk->enable_bit) & 0x000f;
|
||||
|
||||
return clk->freq_table[idx].frequency;
|
||||
}
|
||||
|
@ -270,15 +289,15 @@ static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
|
|||
*/
|
||||
|
||||
if (parent->flags & CLK_ENABLE_ON_INIT)
|
||||
value = ioread32(clk->mapped_reg) & ~(1 << 7);
|
||||
value = sh_clk_read(clk) & ~(1 << 7);
|
||||
else
|
||||
value = ioread32(clk->mapped_reg) | (1 << 7);
|
||||
value = sh_clk_read(clk) | (1 << 7);
|
||||
|
||||
ret = clk_reparent(clk, parent);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
iowrite32(value, clk->mapped_reg);
|
||||
sh_clk_write(value, clk);
|
||||
|
||||
/* Rebiuld the frequency table */
|
||||
clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
|
||||
|
@ -295,10 +314,10 @@ static int sh_clk_div4_set_rate(struct clk *clk, unsigned long rate)
|
|||
if (idx < 0)
|
||||
return idx;
|
||||
|
||||
value = ioread32(clk->mapped_reg);
|
||||
value = sh_clk_read(clk);
|
||||
value &= ~(0xf << clk->enable_bit);
|
||||
value |= (idx << clk->enable_bit);
|
||||
iowrite32(value, clk->mapped_reg);
|
||||
sh_clk_write(value, clk);
|
||||
|
||||
if (d4t->kick)
|
||||
d4t->kick(clk);
|
||||
|
@ -308,13 +327,13 @@ static int sh_clk_div4_set_rate(struct clk *clk, unsigned long rate)
|
|||
|
||||
static int sh_clk_div4_enable(struct clk *clk)
|
||||
{
|
||||
iowrite32(ioread32(clk->mapped_reg) & ~(1 << 8), clk->mapped_reg);
|
||||
sh_clk_write(sh_clk_read(clk) & ~(1 << 8), clk);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sh_clk_div4_disable(struct clk *clk)
|
||||
{
|
||||
iowrite32(ioread32(clk->mapped_reg) | (1 << 8), clk->mapped_reg);
|
||||
sh_clk_write(sh_clk_read(clk) | (1 << 8), clk);
|
||||
}
|
||||
|
||||
static struct sh_clk_ops sh_clk_div4_clk_ops = {
|
||||
|
|
|
@ -55,7 +55,6 @@ static int lowmem_minfree[6] = {
|
|||
};
|
||||
static int lowmem_minfree_size = 4;
|
||||
|
||||
static struct task_struct *lowmem_deathpending;
|
||||
static unsigned long lowmem_deathpending_timeout;
|
||||
|
||||
#define lowmem_print(level, x...) \
|
||||
|
@ -64,24 +63,6 @@ static unsigned long lowmem_deathpending_timeout;
|
|||
printk(x); \
|
||||
} while (0)
|
||||
|
||||
static int
|
||||
task_notify_func(struct notifier_block *self, unsigned long val, void *data);
|
||||
|
||||
static struct notifier_block task_nb = {
|
||||
.notifier_call = task_notify_func,
|
||||
};
|
||||
|
||||
static int
|
||||
task_notify_func(struct notifier_block *self, unsigned long val, void *data)
|
||||
{
|
||||
struct task_struct *task = data;
|
||||
|
||||
if (task == lowmem_deathpending)
|
||||
lowmem_deathpending = NULL;
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
|
||||
{
|
||||
struct task_struct *tsk;
|
||||
|
@ -97,19 +78,6 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
|
|||
int other_file = global_page_state(NR_FILE_PAGES) -
|
||||
global_page_state(NR_SHMEM);
|
||||
|
||||
/*
|
||||
* If we already have a death outstanding, then
|
||||
* bail out right away; indicating to vmscan
|
||||
* that we have nothing further to offer on
|
||||
* this pass.
|
||||
*
|
||||
* Note: Currently you need CONFIG_PROFILING
|
||||
* for this to work correctly.
|
||||
*/
|
||||
if (lowmem_deathpending &&
|
||||
time_before_eq(jiffies, lowmem_deathpending_timeout))
|
||||
return 0;
|
||||
|
||||
if (lowmem_adj_size < array_size)
|
||||
array_size = lowmem_adj_size;
|
||||
if (lowmem_minfree_size < array_size)
|
||||
|
@ -148,6 +116,12 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
|
|||
if (!p)
|
||||
continue;
|
||||
|
||||
if (test_tsk_thread_flag(p, TIF_MEMDIE) &&
|
||||
time_before_eq(jiffies, lowmem_deathpending_timeout)) {
|
||||
task_unlock(p);
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
oom_score_adj = p->signal->oom_score_adj;
|
||||
if (oom_score_adj < min_score_adj) {
|
||||
task_unlock(p);
|
||||
|
@ -174,15 +148,9 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
|
|||
lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n",
|
||||
selected->pid, selected->comm,
|
||||
selected_oom_score_adj, selected_tasksize);
|
||||
/*
|
||||
* If CONFIG_PROFILING is off, then we don't want to stall
|
||||
* the killer by setting lowmem_deathpending.
|
||||
*/
|
||||
#ifdef CONFIG_PROFILING
|
||||
lowmem_deathpending = selected;
|
||||
lowmem_deathpending_timeout = jiffies + HZ;
|
||||
#endif
|
||||
send_sig(SIGKILL, selected, 0);
|
||||
set_tsk_thread_flag(selected, TIF_MEMDIE);
|
||||
rem -= selected_tasksize;
|
||||
}
|
||||
lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n",
|
||||
|
@ -198,7 +166,6 @@ static struct shrinker lowmem_shrinker = {
|
|||
|
||||
static int __init lowmem_init(void)
|
||||
{
|
||||
task_handoff_register(&task_nb);
|
||||
register_shrinker(&lowmem_shrinker);
|
||||
return 0;
|
||||
}
|
||||
|
@ -206,7 +173,6 @@ static int __init lowmem_init(void)
|
|||
static void __exit lowmem_exit(void)
|
||||
{
|
||||
unregister_shrinker(&lowmem_shrinker);
|
||||
task_handoff_unregister(&task_nb);
|
||||
}
|
||||
|
||||
module_param_named(cost, lowmem_shrinker.seeks, int, S_IRUGO | S_IWUSR);
|
||||
|
|
|
@ -59,7 +59,15 @@ struct clk {
|
|||
unsigned int nr_freqs;
|
||||
};
|
||||
|
||||
#define CLK_ENABLE_ON_INIT (1 << 0)
|
||||
#define CLK_ENABLE_ON_INIT BIT(0)
|
||||
|
||||
#define CLK_ENABLE_REG_32BIT BIT(1) /* default access size */
|
||||
#define CLK_ENABLE_REG_16BIT BIT(2)
|
||||
#define CLK_ENABLE_REG_8BIT BIT(3)
|
||||
|
||||
#define CLK_ENABLE_REG_MASK (CLK_ENABLE_REG_32BIT | \
|
||||
CLK_ENABLE_REG_16BIT | \
|
||||
CLK_ENABLE_REG_8BIT)
|
||||
|
||||
/* drivers/sh/clk.c */
|
||||
unsigned long followparent_recalc(struct clk *);
|
||||
|
@ -102,7 +110,7 @@ long clk_round_parent(struct clk *clk, unsigned long target,
|
|||
unsigned long *best_freq, unsigned long *parent_freq,
|
||||
unsigned int div_min, unsigned int div_max);
|
||||
|
||||
#define SH_CLK_MSTP32(_parent, _enable_reg, _enable_bit, _flags) \
|
||||
#define SH_CLK_MSTP(_parent, _enable_reg, _enable_bit, _flags) \
|
||||
{ \
|
||||
.parent = _parent, \
|
||||
.enable_reg = (void __iomem *)_enable_reg, \
|
||||
|
@ -110,7 +118,27 @@ long clk_round_parent(struct clk *clk, unsigned long target,
|
|||
.flags = _flags, \
|
||||
}
|
||||
|
||||
int sh_clk_mstp32_register(struct clk *clks, int nr);
|
||||
#define SH_CLK_MSTP32(_p, _r, _b, _f) \
|
||||
SH_CLK_MSTP(_p, _r, _b, _f | CLK_ENABLE_REG_32BIT)
|
||||
|
||||
#define SH_CLK_MSTP16(_p, _r, _b, _f) \
|
||||
SH_CLK_MSTP(_p, _r, _b, _f | CLK_ENABLE_REG_16BIT)
|
||||
|
||||
#define SH_CLK_MSTP8(_p, _r, _b, _f) \
|
||||
SH_CLK_MSTP(_p, _r, _b, _f | CLK_ENABLE_REG_8BIT)
|
||||
|
||||
int sh_clk_mstp_register(struct clk *clks, int nr);
|
||||
|
||||
/*
|
||||
* MSTP registration never really cared about access size, despite the
|
||||
* original enable/disable pairs assuming a 32-bit access. Clocks are
|
||||
* responsible for defining their access sizes either directly or via the
|
||||
* clock definition wrappers.
|
||||
*/
|
||||
static inline int __deprecated sh_clk_mstp32_register(struct clk *clks, int nr)
|
||||
{
|
||||
return sh_clk_mstp_register(clks, nr);
|
||||
}
|
||||
|
||||
#define SH_CLK_DIV4(_parent, _reg, _shift, _div_bitmap, _flags) \
|
||||
{ \
|
||||
|
|
|
@ -132,8 +132,10 @@ static struct module *new_module(char *modname)
|
|||
/* strip trailing .o */
|
||||
s = strrchr(p, '.');
|
||||
if (s != NULL)
|
||||
if (strcmp(s, ".o") == 0)
|
||||
if (strcmp(s, ".o") == 0) {
|
||||
*s = '\0';
|
||||
mod->is_dot_o = 1;
|
||||
}
|
||||
|
||||
/* add to list */
|
||||
mod->name = p;
|
||||
|
@ -587,7 +589,8 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
|
|||
unsigned int crc;
|
||||
enum export export;
|
||||
|
||||
if (!is_vmlinux(mod->name) && strncmp(symname, "__ksymtab", 9) == 0)
|
||||
if ((!is_vmlinux(mod->name) || mod->is_dot_o) &&
|
||||
strncmp(symname, "__ksymtab", 9) == 0)
|
||||
export = export_from_secname(info, get_secindex(info, sym));
|
||||
else
|
||||
export = export_from_sec(info, get_secindex(info, sym));
|
||||
|
|
|
@ -113,6 +113,7 @@ struct module {
|
|||
int has_cleanup;
|
||||
struct buffer dev_table_buf;
|
||||
char srcversion[25];
|
||||
int is_dot_o;
|
||||
};
|
||||
|
||||
struct elf_info {
|
||||
|
|
Loading…
Add table
Reference in a new issue