Browse Source

Merge branch 'akpm' (patches from Andrew Morton)

Merge first patch-bomb from Andrew Morton:
 "Quite a lot of other stuff is banked up awaiting further
  next->mainline merging, but this batch contains:

   - Lots of random misc patches
   - OCFS2
   - Most of MM
   - backlight updates
   - lib/ updates
   - printk updates
   - checkpatch updates
   - epoll tweaking
   - rtc updates
   - hfs
   - hfsplus
   - documentation
   - procfs
   - update gcov to gcc-4.7 format
   - IPC"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (269 commits)
  ipc, msg: fix message length check for negative values
  ipc/util.c: remove unnecessary work pending test
  devpts: plug the memory leak in kill_sb
  ./Makefile: export initial ramdisk compression config option
  init/Kconfig: add option to disable kernel compression
  drivers: w1: make w1_slave::flags long to avoid memory corruption
  drivers/w1/masters/ds1wm.cuse dev_get_platdata()
  drivers/memstick/core/ms_block.c: fix unreachable state in h_msb_read_page()
  drivers/memstick/core/mspro_block.c: fix attributes array allocation
  drivers/pps/clients/pps-gpio.c: remove redundant of_match_ptr
  kernel/panic.c: reduce 1 byte usage for print tainted buffer
  gcov: reuse kbasename helper
  kernel/gcov/fs.c: use pr_warn()
  kernel/module.c: use pr_foo()
  gcov: compile specific gcov implementation based on gcc version
  gcov: add support for gcc 4.7 gcov format
  gcov: move gcov structs definitions to a gcc version specific file
  kernel/taskstats.c: return -ENOMEM when alloc memory fails in add_del_listener()
  kernel/taskstats.c: add nla_nest_cancel() for failure processing between nla_nest_start() and nla_nest_end()
  kernel/sysctl_binary.c: use scnprintf() instead of snprintf()
  ...
master
Linus Torvalds 8 years ago
parent
commit
5cbb3d216e
  1. 7
      CREDITS
  2. 13
      Documentation/ABI/README
  3. 5
      Documentation/backlight/lp855x-driver.txt
  4. 10
      Documentation/cgroups/memory.txt
  5. 2
      Documentation/cpu-hotplug.txt
  6. 29
      Documentation/devicetree/bindings/video/backlight/lp855x.txt
  7. 1
      Documentation/filesystems/proc.txt
  8. 2
      Documentation/filesystems/vfat.txt
  9. 4
      Documentation/gcov.txt
  10. 6
      Documentation/kernel-parameters.txt
  11. 25
      Documentation/sysctl/kernel.txt
  12. 15
      Documentation/sysctl/vm.txt
  13. 5
      Documentation/trace/tracepoints.txt
  14. 8
      Documentation/vm/zswap.txt
  15. 5
      MAINTAINERS
  16. 16
      Makefile
  17. 2
      arch/alpha/include/uapi/asm/errno.h
  18. 2
      arch/arm/kernel/module.c
  19. 9
      arch/arm/mach-davinci/sram.c
  20. 2
      arch/arm64/kernel/module.c
  21. 1
      arch/cris/include/asm/io.h
  22. 2
      arch/ia64/include/asm/processor.h
  23. 4
      arch/ia64/mm/init.c
  24. 4
      arch/metag/kernel/dma.c
  25. 2
      arch/metag/mm/init.c
  26. 7
      arch/microblaze/mm/consistent.c
  27. 2
      arch/mips/include/uapi/asm/errno.h
  28. 2
      arch/parisc/include/uapi/asm/errno.h
  29. 2
      arch/parisc/kernel/module.c
  30. 4
      arch/powerpc/mm/dma-noncoherent.c
  31. 2
      arch/powerpc/mm/hugetlbpage.c
  32. 3
      arch/powerpc/mm/numa.c
  33. 2
      arch/s390/kernel/module.c
  34. 9
      arch/s390/mm/mmap.c
  35. 2
      arch/sh/include/asm/fpu.h
  36. 10
      arch/sh/include/asm/processor_32.h
  37. 10
      arch/sh/include/asm/processor_64.h
  38. 2
      arch/sh/kernel/cpu/fpu.c
  39. 6
      arch/sh/kernel/process_32.c
  40. 4
      arch/sh/kernel/process_64.c
  41. 2
      arch/sh/mm/init.c
  42. 2
      arch/sparc/include/uapi/asm/errno.h
  43. 2
      arch/sparc/kernel/module.c
  44. 10
      arch/x86/include/asm/fpu-internal.h
  45. 9
      arch/x86/include/asm/processor.h
  46. 2
      arch/x86/kernel/i387.c
  47. 2
      arch/x86/kernel/module.c
  48. 4
      arch/x86/kernel/process_32.c
  49. 2
      arch/x86/kernel/process_64.c
  50. 9
      arch/x86/kernel/setup.c
  51. 2
      arch/x86/kernel/traps.c
  52. 125
      arch/x86/mm/init.c
  53. 11
      arch/x86/mm/numa.c
  54. 10
      drivers/char/Kconfig
  55. 24
      drivers/char/hpet.c
  56. 7
      drivers/dma/mmp_tdma.c
  57. 2
      drivers/iommu/omap-iopgtable.h
  58. 5
      drivers/media/platform/coda.c
  59. 2
      drivers/memstick/core/ms_block.c
  60. 4
      drivers/memstick/core/mspro_block.c
  61. 16
      drivers/message/i2o/driver.c
  62. 3
      drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
  63. 2
      drivers/pps/clients/pps-gpio.c
  64. 20
      drivers/rtc/Kconfig
  65. 2
      drivers/rtc/Makefile
  66. 8
      drivers/rtc/rtc-88pm80x.c
  67. 2
      drivers/rtc/rtc-88pm860x.c
  68. 275
      drivers/rtc/rtc-as3722.c
  69. 28
      drivers/rtc/rtc-at91rm9200.c
  70. 3
      drivers/rtc/rtc-cmos.c
  71. 2
      drivers/rtc/rtc-da9055.c
  72. 2
      drivers/rtc/rtc-ds1305.c
  73. 10
      drivers/rtc/rtc-ds1307.c
  74. 2
      drivers/rtc/rtc-ds2404.c
  75. 6
      drivers/rtc/rtc-ep93xx.c
  76. 42
      drivers/rtc/rtc-isl1208.c
  77. 20
      drivers/rtc/rtc-m48t59.c
  78. 8
      drivers/rtc/rtc-m48t86.c
  79. 9
      drivers/rtc/rtc-max6900.c
  80. 2
      drivers/rtc/rtc-mrst.c
  81. 2
      drivers/rtc/rtc-omap.c
  82. 2
      drivers/rtc/rtc-pcf2123.c
  83. 9
      drivers/rtc/rtc-pl030.c
  84. 2
      drivers/rtc/rtc-pl031.c
  85. 22
      drivers/rtc/rtc-puv3.c
  86. 4
      drivers/rtc/rtc-rs5c348.c
  87. 635
      drivers/rtc/rtc-s5m.c
  88. 5
      drivers/rtc/rtc-sh.c
  89. 27
      drivers/rtc/rtc-sirfsoc.c
  90. 2
      drivers/rtc/rtc-snvs.c
  91. 2
      drivers/rtc/rtc-stmp3xxx.c
  92. 1
      drivers/rtc/rtc-tps65910.c
  93. 2
      drivers/rtc/rtc-v3020.c
  94. 4
      drivers/rtc/rtc-vr41xx.c
  95. 4
      drivers/rtc/rtc-vt8500.c
  96. 2
      drivers/staging/lustre/lustre/include/lustre/lustre_errno.h
  97. 6
      drivers/uio/uio_pruss.c
  98. 4
      drivers/video/acornfb.c
  99. 18
      drivers/video/backlight/88pm860x_bl.c
  100. 10
      drivers/video/backlight/Kconfig

7
CREDITS

@ -2576,7 +2576,7 @@ S: Toronto, Ontario
S: Canada
N: Zwane Mwaikambo
E: zwane@arm.linux.org.uk
E: zwanem@gmail.com
D: Various driver hacking
D: Lowlevel x86 kernel hacking
D: General debugging
@ -2895,6 +2895,11 @@ S: Framewood Road
S: Wexham SL3 6PJ
S: United Kingdom
N: Richard Purdie
E: rpurdie@rpsys.net
D: Backlight subsystem maintainer
S: United Kingdom
N: Daniel Quinlan
E: quinlan@pathname.com
W: http://www.pathname.com/~quinlan/

13
Documentation/ABI/README

@ -72,3 +72,16 @@ kernel tree without going through the obsolete state first.
It's up to the developer to place their interfaces in the category they
wish for it to start out in.
Notable bits of non-ABI, which should not under any circumstances be considered
stable:
- Kconfig. Userspace should not rely on the presence or absence of any
particular Kconfig symbol, in /proc/config.gz, in the copy of .config
commonly installed to /boot, or in any invocation of the kernel build
process.
- Kernel-internal symbols. Do not rely on the presence, absence, location, or
type of any kernel symbol, either in System.map files or the kernel binary
itself. See Documentation/stable_api_nonsense.txt.

5
Documentation/backlight/lp855x-driver.txt

@ -4,7 +4,8 @@ Kernel driver lp855x
Backlight driver for LP855x ICs
Supported chips:
Texas Instruments LP8550, LP8551, LP8552, LP8553, LP8556 and LP8557
Texas Instruments LP8550, LP8551, LP8552, LP8553, LP8555, LP8556 and
LP8557
Author: Milo(Woogyom) Kim <milo.kim@ti.com>
@ -24,7 +25,7 @@ Value : pwm based or register based
2) chip_id
The lp855x chip id.
Value : lp8550/lp8551/lp8552/lp8553/lp8556/lp8557
Value : lp8550/lp8551/lp8552/lp8553/lp8555/lp8556/lp8557
Platform data for lp855x
------------------------

10
Documentation/cgroups/memory.txt

@ -573,15 +573,19 @@ an memcg since the pages are allowed to be allocated from any physical
node. One of the use cases is evaluating application performance by
combining this information with the application's CPU allocation.
We export "total", "file", "anon" and "unevictable" pages per-node for
each memcg. The ouput format of memory.numa_stat is:
Each memcg's numa_stat file includes "total", "file", "anon" and "unevictable"
per-node page counts including "hierarchical_<counter>" which sums up all
hierarchical children's values in addition to the memcg's own value.
The ouput format of memory.numa_stat is:
total=<total pages> N0=<node 0 pages> N1=<node 1 pages> ...
file=<total file pages> N0=<node 0 pages> N1=<node 1 pages> ...
anon=<total anon pages> N0=<node 0 pages> N1=<node 1 pages> ...
unevictable=<total anon pages> N0=<node 0 pages> N1=<node 1 pages> ...
hierarchical_<counter>=<counter pages> N0=<node 0 pages> N1=<node 1 pages> ...
And we have total = file + anon + unevictable.
The "total" count is sum of file + anon + unevictable.
6. Hierarchy support

2
Documentation/cpu-hotplug.txt

@ -5,7 +5,7 @@
Rusty Russell <rusty@rustcorp.com.au>
Srivatsa Vaddagiri <vatsa@in.ibm.com>
i386:
Zwane Mwaikambo <zwane@arm.linux.org.uk>
Zwane Mwaikambo <zwanem@gmail.com>
ppc64:
Nathan Lynch <nathanl@austin.ibm.com>
Joel Schopp <jschopp@austin.ibm.com>

29
Documentation/devicetree/bindings/video/backlight/lp855x.txt

@ -2,7 +2,7 @@ lp855x bindings
Required properties:
- compatible: "ti,lp8550", "ti,lp8551", "ti,lp8552", "ti,lp8553",
"ti,lp8556", "ti,lp8557"
"ti,lp8555", "ti,lp8556", "ti,lp8557"
- reg: I2C slave address (u8)
- dev-ctrl: Value of DEVICE CONTROL register (u8). It depends on the device.
@ -15,6 +15,33 @@ Optional properties:
Example:
/* LP8555 */
backlight@2c {
compatible = "ti,lp8555";
reg = <0x2c>;
dev-ctrl = /bits/ 8 <0x00>;
pwm-period = <10000>;
/* 4V OV, 4 output LED0 string enabled */
rom_14h {
rom-addr = /bits/ 8 <0x14>;
rom-val = /bits/ 8 <0xcf>;
};
/* Heavy smoothing, 24ms ramp time step */
rom_15h {
rom-addr = /bits/ 8 <0x15>;
rom-val = /bits/ 8 <0xc7>;
};
/* 4 output LED1 string enabled */
rom_19h {
rom-addr = /bits/ 8 <0x19>;
rom-val = /bits/ 8 <0x0f>;
};
};
/* LP8556 */
backlight@2c {
compatible = "ti,lp8556";

1
Documentation/filesystems/proc.txt

@ -460,6 +460,7 @@ manner. The codes are the following:
nl - non-linear mapping
ar - architecture specific flag
dd - do not include area into core dump
sd - soft-dirty flag
mm - mixed map area
hg - huge page advise flag
nh - no-huge page advise flag

2
Documentation/filesystems/vfat.txt

@ -307,7 +307,7 @@ the following:
<proceeding files...>
<slot #3, id = 0x43, characters = "h is long">
<slot #2, id = 0x02, characters = "xtension which">
<slot #2, id = 0x02, characters = "xtension whic">
<slot #1, id = 0x01, characters = "My Big File.E">
<directory entry, name = "MYBIGFIL.EXT">

4
Documentation/gcov.txt

@ -50,6 +50,10 @@ Configure the kernel with:
CONFIG_DEBUG_FS=y
CONFIG_GCOV_KERNEL=y
select the gcc's gcov format, default is autodetect based on gcc version:
CONFIG_GCOV_FORMAT_AUTODETECT=y
and to get coverage data for the entire kernel:
CONFIG_GCOV_PROFILE_ALL=y

6
Documentation/kernel-parameters.txt

@ -1070,6 +1070,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
VIA, nVidia)
verbose: show contents of HPET registers during setup
hpet_mmap= [X86, HPET_MMAP] Allow userspace to mmap HPET
registers. Default set by CONFIG_HPET_MMAP_DEFAULT.
hugepages= [HW,X86-32,IA-64] HugeTLB pages to allocate at boot.
hugepagesz= [HW,IA-64,PPC,X86-64] The size of the HugeTLB pages.
On x86-64 and powerpc, this option can be specified
@ -1775,6 +1778,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
that the amount of memory usable for all allocations
is not too small.
movable_node [KNL,X86] Boot-time switch to enable the effects
of CONFIG_MOVABLE_NODE=y. See mm/Kconfig for details.
MTD_Partition= [MTD]
Format: <name>,<region-number>,<size>,<offset>

25
Documentation/sysctl/kernel.txt

@ -290,13 +290,24 @@ Default value is "/sbin/hotplug".
kptr_restrict:
This toggle indicates whether restrictions are placed on
exposing kernel addresses via /proc and other interfaces. When
kptr_restrict is set to (0), there are no restrictions. When
kptr_restrict is set to (1), the default, kernel pointers
printed using the %pK format specifier will be replaced with 0's
unless the user has CAP_SYSLOG. When kptr_restrict is set to
(2), kernel pointers printed using %pK will be replaced with 0's
regardless of privileges.
exposing kernel addresses via /proc and other interfaces.
When kptr_restrict is set to (0), the default, there are no restrictions.
When kptr_restrict is set to (1), kernel pointers printed using the %pK
format specifier will be replaced with 0's unless the user has CAP_SYSLOG
and effective user and group ids are equal to the real ids. This is
because %pK checks are done at read() time rather than open() time, so
if permissions are elevated between the open() and the read() (e.g via
a setuid binary) then %pK will not leak kernel pointers to unprivileged
users. Note, this is a temporary solution only. The correct long-term
solution is to do the permission checks at open() time. Consider removing
world read permissions from files that use %pK, and using dmesg_restrict
to protect against uses of %pK in dmesg(8) if leaking kernel pointer
values to unprivileged users is a concern.
When kptr_restrict is set to (2), kernel pointers printed using
%pK will be replaced with 0's regardless of privileges.
==============================================================

15
Documentation/sysctl/vm.txt

@ -119,8 +119,11 @@ other appears as 0 when read.
dirty_background_ratio
Contains, as a percentage of total system memory, the number of pages at which
the background kernel flusher threads will start writing out dirty data.
Contains, as a percentage of total available memory that contains free pages
and reclaimable pages, the number of pages at which the background kernel
flusher threads will start writing out dirty data.
The total avaiable memory is not equal to total system memory.
==============================================================
@ -151,9 +154,11 @@ interval will be written out next time a flusher thread wakes up.
dirty_ratio
Contains, as a percentage of total system memory, the number of pages at which
a process which is generating disk writes will itself start writing out dirty
data.
Contains, as a percentage of total available memory that contains free pages
and reclaimable pages, the number of pages at which a process which is
generating disk writes will itself start writing out dirty data.
The total avaiable memory is not equal to total system memory.
==============================================================

5
Documentation/trace/tracepoints.txt

@ -114,3 +114,8 @@ core kernel image or in modules.
If the tracepoint has to be used in kernel modules, an
EXPORT_TRACEPOINT_SYMBOL_GPL() or EXPORT_TRACEPOINT_SYMBOL() can be
used to export the defined tracepoints.
Note: The convenience macro TRACE_EVENT provides an alternative way to
define tracepoints. Check http://lwn.net/Articles/379903,
http://lwn.net/Articles/381064 and http://lwn.net/Articles/383362
for a series of articles with more details.

8
Documentation/vm/zswap.txt

@ -8,7 +8,7 @@ significant performance improvement if reads from the compressed cache are
faster than reads from a swap device.
NOTE: Zswap is a new feature as of v3.11 and interacts heavily with memory
reclaim. This interaction has not be fully explored on the large set of
reclaim. This interaction has not been fully explored on the large set of
potential configurations and workloads that exist. For this reason, zswap
is a work in progress and should be considered experimental.
@ -23,7 +23,7 @@ Some potential benefits:
    drastically reducing life-shortening writes.
Zswap evicts pages from compressed cache on an LRU basis to the backing swap
device when the compressed pool reaches it size limit. This requirement had
device when the compressed pool reaches its size limit. This requirement had
been identified in prior community discussions.
To enabled zswap, the "enabled" attribute must be set to 1 at boot time. e.g.
@ -37,7 +37,7 @@ the backing swap device in the case that the compressed pool is full.
Zswap makes use of zbud for the managing the compressed memory pool. Each
allocation in zbud is not directly accessible by address. Rather, a handle is
return by the allocation routine and that handle must be mapped before being
returned by the allocation routine and that handle must be mapped before being
accessed. The compressed memory pool grows on demand and shrinks as compressed
pages are freed. The pool is not preallocated.
@ -56,7 +56,7 @@ in the swap_map goes to 0) the swap code calls the zswap invalidate function,
via frontswap, to free the compressed entry.
Zswap seeks to be simple in its policies. Sysfs attributes allow for one user
controlled policies:
controlled policy:
* max_pool_percent - The maximum percentage of memory that the compressed
pool can occupy.

5
MAINTAINERS

@ -1661,7 +1661,6 @@ S: Maintained
F: drivers/net/wireless/b43legacy/
BACKLIGHT CLASS/SUBSYSTEM
M: Richard Purdie <rpurdie@rpsys.net>
M: Jingoo Han <jg1.han@samsung.com>
S: Maintained
F: drivers/video/backlight/
@ -2373,7 +2372,7 @@ F: kernel/cpuset.c
CRAMFS FILESYSTEM
W: http://sourceforge.net/projects/cramfs/
S: Orphan
S: Orphan / Obsolete
F: Documentation/filesystems/cramfs.txt
F: fs/cramfs/
@ -7320,7 +7319,7 @@ S: Odd Fixes
F: drivers/media/usb/tlg2300/
SC1200 WDT DRIVER
M: Zwane Mwaikambo <zwane@arm.linux.org.uk>
M: Zwane Mwaikambo <zwanem@gmail.com>
S: Maintained
F: drivers/watchdog/sc1200wdt.c

16
Makefile

@ -720,6 +720,22 @@ mod_strip_cmd = true
endif # INSTALL_MOD_STRIP
export mod_strip_cmd
# Select initial ramdisk compression format, default is gzip(1).
# This shall be used by the dracut(8) tool while creating an initramfs image.
#
INITRD_COMPRESS=gzip
ifeq ($(CONFIG_RD_BZIP2), y)
INITRD_COMPRESS=bzip2
else ifeq ($(CONFIG_RD_LZMA), y)
INITRD_COMPRESS=lzma
else ifeq ($(CONFIG_RD_XZ), y)
INITRD_COMPRESS=xz
else ifeq ($(CONFIG_RD_LZO), y)
INITRD_COMPRESS=lzo
else ifeq ($(CONFIG_RD_LZ4), y)
INITRD_COMPRESS=lz4
endif
export INITRD_COMPRESS
ifdef CONFIG_MODULE_SIG_ALL
MODSECKEY = ./signing_key.priv

2
arch/alpha/include/uapi/asm/errno.h

@ -43,7 +43,7 @@
#define EUSERS 68 /* Too many users */
#define EDQUOT 69 /* Quota exceeded */
#define ESTALE 70 /* Stale NFS file handle */
#define ESTALE 70 /* Stale file handle */
#define EREMOTE 71 /* Object is remote */
#define ENOLCK 77 /* No record locks available */

2
arch/arm/kernel/module.c

@ -40,7 +40,7 @@
void *module_alloc(unsigned long size)
{
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
__builtin_return_address(0));
}
#endif

9
arch/arm/mach-davinci/sram.c

@ -25,7 +25,6 @@ struct gen_pool *sram_get_gen_pool(void)
void *sram_alloc(size_t len, dma_addr_t *dma)
{
unsigned long vaddr;
dma_addr_t dma_base = davinci_soc_info.sram_dma;
if (dma)
@ -33,13 +32,7 @@ void *sram_alloc(size_t len, dma_addr_t *dma)
if (!sram_pool || (dma && !dma_base))
return NULL;
vaddr = gen_pool_alloc(sram_pool, len);
if (!vaddr)
return NULL;
if (dma)
*dma = gen_pool_virt_to_phys(sram_pool, vaddr);
return (void *)vaddr;
return gen_pool_dma_alloc(sram_pool, len, dma);
}
EXPORT_SYMBOL(sram_alloc);

2
arch/arm64/kernel/module.c

@ -29,7 +29,7 @@
void *module_alloc(unsigned long size)
{
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
__builtin_return_address(0));
}

1
arch/cris/include/asm/io.h

@ -3,6 +3,7 @@
#include <asm/page.h> /* for __va, __pa */
#include <arch/io.h>
#include <asm-generic/iomap.h>
#include <linux/kernel.h>
struct cris_io_operations

2
arch/ia64/include/asm/processor.h

@ -319,7 +319,7 @@ struct thread_struct {
regs->loadrs = 0; \
regs->r8 = get_dumpable(current->mm); /* set "don't zap registers" flag */ \
regs->r12 = new_sp - 16; /* allocate 16 byte scratch area */ \
if (unlikely(!get_dumpable(current->mm))) { \
if (unlikely(get_dumpable(current->mm) != SUID_DUMP_USER)) { \
/* \
* Zap scratch regs to avoid leaking bits between processes with different \
* uid/privileges. \

4
arch/ia64/mm/init.c

@ -357,9 +357,7 @@ int vmemmap_find_next_valid_pfn(int node, int i)
end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
end_address = PAGE_ALIGN(end_address);
stop_address = (unsigned long) &vmem_map[
pgdat->node_start_pfn + pgdat->node_spanned_pages];
stop_address = (unsigned long) &vmem_map[pgdat_end_pfn(pgdat)];
do {
pgd_t *pgd;

4
arch/metag/kernel/dma.c

@ -305,9 +305,7 @@ void dma_free_coherent(struct device *dev, size_t size,
if (pfn_valid(pfn)) {
struct page *page = pfn_to_page(pfn);
ClearPageReserved(page);
__free_page(page);
__free_reserved_page(page);
continue;
}
}

2
arch/metag/mm/init.c

@ -148,7 +148,7 @@ static void __init bootmem_init_one_node(unsigned int nid)
if (!p->node_spanned_pages)
return;
end_pfn = p->node_start_pfn + p->node_spanned_pages;
end_pfn = pgdat_end_pfn(p);
#ifdef CONFIG_HIGHMEM
if (end_pfn > max_low_pfn)
end_pfn = max_low_pfn;

7
arch/microblaze/mm/consistent.c

@ -176,8 +176,7 @@ void consistent_free(size_t size, void *vaddr)
page = virt_to_page(vaddr);
do {
ClearPageReserved(page);
__free_page(page);
__free_reserved_page(page);
page++;
} while (size -= PAGE_SIZE);
#else
@ -194,9 +193,7 @@ void consistent_free(size_t size, void *vaddr)
pte_clear(&init_mm, (unsigned int)vaddr, ptep);
if (pfn_valid(pfn)) {
page = pfn_to_page(pfn);
ClearPageReserved(page);
__free_page(page);
__free_reserved_page(page);
}
}
vaddr += PAGE_SIZE;

2
arch/mips/include/uapi/asm/errno.h

@ -102,7 +102,7 @@
#define EWOULDBLOCK EAGAIN /* Operation would block */
#define EALREADY 149 /* Operation already in progress */
#define EINPROGRESS 150 /* Operation now in progress */
#define ESTALE 151 /* Stale NFS file handle */
#define ESTALE 151 /* Stale file handle */
#define ECANCELED 158 /* AIO operation canceled */
/*

2
arch/parisc/include/uapi/asm/errno.h

@ -37,7 +37,7 @@
#define EBADMSG 67 /* Not a data message */
#define EUSERS 68 /* Too many users */
#define EDQUOT 69 /* Quota exceeded */
#define ESTALE 70 /* Stale NFS file handle */
#define ESTALE 70 /* Stale file handle */
#define EREMOTE 71 /* Object is remote */
#define EOVERFLOW 72 /* Value too large for defined data type */

2
arch/parisc/kernel/module.c

@ -219,7 +219,7 @@ void *module_alloc(unsigned long size)
* init_data correctly */
return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
GFP_KERNEL | __GFP_HIGHMEM,
PAGE_KERNEL_RWX, -1,
PAGE_KERNEL_RWX, NUMA_NO_NODE,
__builtin_return_address(0));
}

4
arch/powerpc/mm/dma-noncoherent.c

@ -287,9 +287,7 @@ void __dma_free_coherent(size_t size, void *vaddr)
pte_clear(&init_mm, addr, ptep);
if (pfn_valid(pfn)) {
struct page *page = pfn_to_page(pfn);
ClearPageReserved(page);
__free_page(page);
__free_reserved_page(page);
}
}
addr += PAGE_SIZE;

2
arch/powerpc/mm/hugetlbpage.c

@ -633,8 +633,6 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
/*
* This function frees user-level page tables of a process.
*
* Must be called with pagetable lock held.
*/
void hugetlb_free_pgd_range(struct mmu_gather *tlb,
unsigned long addr, unsigned long end,

3
arch/powerpc/mm/numa.c

@ -938,8 +938,7 @@ static void __init mark_reserved_regions_for_nid(int nid)
unsigned long start_pfn = physbase >> PAGE_SHIFT;
unsigned long end_pfn = PFN_UP(physbase + size);
struct node_active_region node_ar;
unsigned long node_end_pfn = node->node_start_pfn +
node->node_spanned_pages;
unsigned long node_end_pfn = pgdat_end_pfn(node);
/*
* Check to make sure that this memblock.reserved area is

2
arch/s390/kernel/module.c

@ -50,7 +50,7 @@ void *module_alloc(unsigned long size)
if (PAGE_ALIGN(size) > MODULES_LEN)
return NULL;
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
GFP_KERNEL, PAGE_KERNEL, -1,
GFP_KERNEL, PAGE_KERNEL, NUMA_NO_NODE,
__builtin_return_address(0));
}
#endif

9
arch/s390/mm/mmap.c

@ -64,6 +64,11 @@ static unsigned long mmap_rnd(void)
return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
}
static unsigned long mmap_base_legacy(void)
{
return TASK_UNMAPPED_BASE + mmap_rnd();
}
static inline unsigned long mmap_base(void)
{
unsigned long gap = rlimit(RLIMIT_STACK);
@ -89,7 +94,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
* bit is set, or if the expected stack growth is unlimited:
*/
if (mmap_is_legacy()) {
mm->mmap_base = TASK_UNMAPPED_BASE;
mm->mmap_base = mmap_base_legacy();
mm->get_unmapped_area = arch_get_unmapped_area;
} else {
mm->mmap_base = mmap_base();
@ -164,7 +169,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
* bit is set, or if the expected stack growth is unlimited:
*/
if (mmap_is_legacy()) {
mm->mmap_base = TASK_UNMAPPED_BASE;
mm->mmap_base = mmap_base_legacy();
mm->get_unmapped_area = s390_get_unmapped_area;
} else {
mm->mmap_base = mmap_base();

2
arch/sh/include/asm/fpu.h

@ -46,7 +46,7 @@ static inline void __unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs)
save_fpu(tsk);
release_fpu(regs);
} else
tsk->fpu_counter = 0;
tsk->thread.fpu_counter = 0;
}
static inline void unlazy_fpu(struct task_struct *tsk, struct pt_regs *regs)

10
arch/sh/include/asm/processor_32.h

@ -111,6 +111,16 @@ struct thread_struct {
/* Extended processor state */
union thread_xstate *xstate;
/*
* fpu_counter contains the number of consecutive context switches
* that the FPU is used. If this is over a threshold, the lazy fpu
* saving becomes unlazy to save the trap. This is an unsigned char
* so that after 256 times the counter wraps and the behavior turns
* lazy again; this to deal with bursty apps that only use FPU for
* a short time
*/
unsigned char fpu_counter;
};
#define INIT_THREAD { \

10
arch/sh/include/asm/processor_64.h

@ -126,6 +126,16 @@ struct thread_struct {
/* floating point info */
union thread_xstate *xstate;
/*
* fpu_counter contains the number of consecutive context switches
* that the FPU is used. If this is over a threshold, the lazy fpu
* saving becomes unlazy to save the trap. This is an unsigned char
* so that after 256 times the counter wraps and the behavior turns
* lazy again; this to deal with bursty apps that only use FPU for
* a short time
*/
unsigned char fpu_counter;
};
#define INIT_MMAP \

2
arch/sh/kernel/cpu/fpu.c

@ -44,7 +44,7 @@ void __fpu_state_restore(void)
restore_fpu(tsk);
task_thread_info(tsk)->status |= TS_USEDFPU;
tsk->fpu_counter++;
tsk->thread.fpu_counter++;
}
void fpu_state_restore(struct pt_regs *regs)

6
arch/sh/kernel/process_32.c

@ -156,7 +156,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
#endif
ti->addr_limit = KERNEL_DS;
ti->status &= ~TS_USEDFPU;
p->fpu_counter = 0;
p->thread.fpu_counter = 0;
return 0;
}
*childregs = *current_pt_regs();
@ -189,7 +189,7 @@ __switch_to(struct task_struct *prev, struct task_struct *next)
unlazy_fpu(prev, task_pt_regs(prev));
/* we're going to use this soon, after a few expensive things */
if (next->fpu_counter > 5)
if (next->thread.fpu_counter > 5)
prefetch(next_t->xstate);
#ifdef CONFIG_MMU
@ -207,7 +207,7 @@ __switch_to(struct task_struct *prev, struct task_struct *next)
* restore of the math state immediately to avoid the trap; the
* chances of needing FPU soon are obviously high now
*/
if (next->fpu_counter > 5)
if (next->thread.fpu_counter > 5)
__fpu_state_restore();
return prev;

4
arch/sh/kernel/process_64.c

@ -374,7 +374,7 @@ asmlinkage void ret_from_kernel_thread(void);
int copy_thread(unsigned long clone_flags, unsigned long usp,
unsigned long arg, struct task_struct *p)
{
struct pt_regs *childregs, *regs = current_pt_regs();
struct pt_regs *childregs;
#ifdef CONFIG_SH_FPU
/* can't happen for a kernel thread */
@ -393,7 +393,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
if (unlikely(p->flags & PF_KTHREAD)) {
memset(childregs, 0, sizeof(struct pt_regs));
childregs->regs[2] = (unsigned long)arg;
childregs->regs[3] = (unsigned long)fn;
childregs->regs[3] = (unsigned long)usp;
childregs->sr = (1 << 30); /* not user_mode */
childregs->sr |= SR_FD; /* Invalidate FPU flag */
p->thread.pc = (unsigned long) ret_from_kernel_thread;

2
arch/sh/mm/init.c

@ -231,7 +231,7 @@ static void __init bootmem_init_one_node(unsigned int nid)
if (!p->node_spanned_pages)
return;
end_pfn = p->node_start_pfn + p->node_spanned_pages;
end_pfn = pgdat_end_pfn(p);
total_pages = bootmem_bootmap_pages(p->node_spanned_pages);

2
arch/sparc/include/uapi/asm/errno.h

@ -40,7 +40,7 @@
#define EPROCLIM 67 /* SUNOS: Too many processes */
#define EUSERS 68 /* Too many users */
#define EDQUOT 69 /* Quota exceeded */
#define ESTALE 70 /* Stale NFS file handle */
#define ESTALE 70 /* Stale file handle */
#define EREMOTE 71 /* Object is remote */
#define ENOSTR 72 /* Device not a stream */
#define ETIME 73 /* Timer expired */

2
arch/sparc/kernel/module.c

@ -29,7 +29,7 @@ static void *module_map(unsigned long size)
if (PAGE_ALIGN(size) > MODULES_LEN)
return NULL;
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
GFP_KERNEL, PAGE_KERNEL, -1,
GFP_KERNEL, PAGE_KERNEL, NUMA_NO_NODE,
__builtin_return_address(0));
}
#else

10
arch/x86/include/asm/fpu-internal.h

@ -365,7 +365,7 @@ static inline void drop_fpu(struct task_struct *tsk)
* Forget coprocessor state..
*/
preempt_disable();
tsk->fpu_counter = 0;
tsk->thread.fpu_counter = 0;
__drop_fpu(tsk);
clear_used_math();
preempt_enable();
@ -424,7 +424,7 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta
* or if the past 5 consecutive context-switches used math.
*/
fpu.preload = tsk_used_math(new) && (use_eager_fpu() ||
new->fpu_counter > 5);
new->thread.fpu_counter > 5);
if (__thread_has_fpu(old)) {
if (!__save_init_fpu(old))
cpu = ~0;
@ -433,16 +433,16 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta
/* Don't change CR0.TS if we just switch! */
if (fpu.preload) {
new->fpu_counter++;
new->thread.fpu_counter++;
__thread_set_has_fpu(new);
prefetch(new->thread.fpu.state);
} else if (!use_eager_fpu())
stts();
} else {
old->fpu_counter = 0;
old->thread.fpu_counter = 0;
old->thread.fpu.last_cpu = ~0;
if (fpu.preload) {
new->fpu_counter++;
new->thread.fpu_counter++;
if (!use_eager_fpu() && fpu_lazy_restore(new, cpu))
fpu.preload = 0;
else

9
arch/x86/include/asm/processor.h

@ -488,6 +488,15 @@ struct thread_struct {
unsigned long iopl;
/* Max allowed port in the bitmap, in bytes: */
unsigned io_bitmap_max;
/*
* fpu_counter contains the number of consecutive context switches
* that the FPU is used. If this is over a threshold, the lazy fpu
* saving becomes unlazy to save the trap. This is an unsigned char
* so that after 256 times the counter wraps and the behavior turns
* lazy again; this to deal with bursty apps that only use FPU for
* a short time
*/
unsigned char fpu_counter;
};
/*

2
arch/x86/kernel/i387.c

@ -100,7 +100,7 @@ void unlazy_fpu(struct task_struct *tsk)
__save_init_fpu(tsk);
__thread_fpu_end(tsk);
} else
tsk->fpu_counter = 0;
tsk->thread.fpu_counter = 0;
preempt_enable();
}
EXPORT_SYMBOL(unlazy_fpu);

2
arch/x86/kernel/module.c

@ -49,7 +49,7 @@ void *module_alloc(unsigned long size)
return NULL;
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
-1, __builtin_return_address(0));
NUMA_NO_NODE, __builtin_return_address(0));
}
#ifdef CONFIG_X86_32

4
arch/x86/kernel/process_32.c

@ -153,7 +153,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
childregs->orig_ax = -1;
childregs->cs = __KERNEL_CS | get_kernel_rpl();
childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_FIXED;
p->fpu_counter = 0;
p->thread.fpu_counter = 0;
p->thread.io_bitmap_ptr = NULL;
memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
return 0;
@ -166,7 +166,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
p->thread.ip = (unsigned long) ret_from_fork;
task_user_gs(p) = get_user_gs(current_pt_regs());
p->fpu_counter = 0;
p->thread.fpu_counter = 0;
p->thread.io_bitmap_ptr = NULL;
tsk = current;
err = -ENOMEM;

2
arch/x86/kernel/process_64.c

@ -163,7 +163,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
p->thread.sp = (unsigned long) childregs;
p->thread.usersp = me->thread.usersp;
set_tsk_thread_flag(p, TIF_FORK);
p->fpu_counter = 0;
p->thread.fpu_counter = 0;
p->thread.io_bitmap_ptr = NULL;
savesegment(gs, p->thread.gsindex);

9
arch/x86/kernel/setup.c

@ -1121,8 +1121,6 @@ void __init setup_arch(char **cmdline_p)
acpi_initrd_override((void *)initrd_start, initrd_end - initrd_start);
#endif
reserve_crashkernel();
vsmp_init();
io_delay_init();
@ -1135,6 +1133,13 @@ void __init setup_arch(char **cmdline_p)
early_acpi_boot_init();
initmem_init();
/*
* Reserve memory for crash kernel after SRAT is parsed so that it
* won't consume hotpluggable memory.
*/
reserve_crashkernel();
memblock_find_dma_reserve();
#ifdef CONFIG_KVM_GUEST

2
arch/x86/kernel/traps.c

@ -653,7 +653,7 @@ void math_state_restore(void)
return;
}
tsk->fpu_counter++;
tsk->thread.fpu_counter++;
}
EXPORT_SYMBOL_GPL(math_state_restore);

125
arch/x86/mm/init.c

@ -53,12 +53,12 @@ __ref void *alloc_low_pages(unsigned int num)
if ((pgt_buf_end + num) > pgt_buf_top || !can_use_brk_pgt) {
unsigned long ret;
if (min_pfn_mapped >= max_pfn_mapped)
panic("alloc_low_page: ran out of memory");
panic("alloc_low_pages: ran out of memory");
ret = memblock_find_in_range(min_pfn_mapped << PAGE_SHIFT,
max_pfn_mapped << PAGE_SHIFT,
PAGE_SIZE * num , PAGE_SIZE);
if (!ret)
panic("alloc_low_page: can not alloc memory");
panic("alloc_low_pages: can not alloc memory");
memblock_reserve(ret, PAGE_SIZE * num);
pfn = ret >> PAGE_SHIFT;
} else {
@ -418,27 +418,27 @@ static unsigned long __init get_new_step_size(unsigned long step_size)
return step_size << 5;
}
void __init init_mem_mapping(void)
/**
* memory_map_top_down - Map [map_start, map_end) top down
* @map_start: start address of the target memory range
* @map_end: end address of the target memory range
*
* This function will setup direct mapping for memory range
* [map_start, map_end) in top-down. That said, the page tables
* will be allocated at the end of the memory, and we map the
* memory in top-down.
*/
static void __init memory_map_top_down(unsigned long map_start,
unsigned long map_end)
{
unsigned long end, real_end, start, last_start;
unsigned long real_end, start, last_start;
unsigned long step_size;
unsigned long addr;
unsigned long mapped_ram_size = 0;
unsigned long new_mapped_ram_size;
probe_page_size_mask();
#ifdef CONFIG_X86_64
end = max_pfn << PAGE_SHIFT;
#else
end = max_low_pfn << PAGE_SHIFT;
#endif
/* the ISA range is always mapped regardless of memory holes */
init_memory_mapping(0, ISA_END_ADDRESS);
/* xen has big range in reserved near end of ram, skip it at first.*/
addr = memblock_find_in_range(ISA_END_ADDRESS, end, PMD_SIZE, PMD_SIZE);
addr = memblock_find_in_range(map_start, map_end, PMD_SIZE, PMD_SIZE);
real_end = addr + PMD_SIZE;
/* step_size need to be small so pgt_buf from BRK could cover it */
@ -453,13 +453,13 @@ void __init init_mem_mapping(void)
* end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages
* for page table.
*/
while (last_start > ISA_END_ADDRESS) {
while (last_start > map_start) {
if (last_start > step_size) {
start = round_down(last_start - 1, step_size);
if (start < ISA_END_ADDRESS)
start = ISA_END_ADDRESS;
if (start < map_start)
start = map_start;
} else
start = ISA_END_ADDRESS;
start = map_start;
new_mapped_ram_size = init_range_memory_mapping(start,
last_start);
last_start = start;
@ -470,8 +470,89 @@ void __init init_mem_mapping(void)
mapped_ram_size += new_mapped_ram_size;
}
if (real_end < end)
init_range_memory_mapping(real_end, end);
if (real_end < map_end)
init_range_memory_mapping(real_end, map_end);
}
/**
* memory_map_bottom_up - Map [map_start, map_end) bottom up
* @map_start: start address of the target memory range
* @map_end: end address of the target memory range
*
* This function will setup direct mapping for memory range
* [map_start, map_end) in bottom-up. Since we have limited the
* bottom-up allocation above the kernel, the page tables will
* be allocated just above the kernel and we map the memory
* in [map_start, map_end) in bottom-up.
*/
static void __init memory_map_bottom_up(unsigned long map_start,
unsigned long map_end)
{
unsigned long next, new_mapped_ram_size, start;
unsigned long mapped_ram_size = 0;
/* step_size need to be small so pgt_buf from BRK could cover it */
unsigned long step_size = PMD_SIZE;
start = map_start;
min_pfn_mapped = start >> PAGE_SHIFT;
/*
* We start from the bottom (@map_start) and go to the top (@map_end).
* The memblock_find_in_range() gets us a block of RAM from the
* end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages
* for page table.
*/
while (start < map_end) {
if (map_end - start > step_size) {
next = round_up(start + 1, step_size);
if (next > map_end)
next = map_end;
} else
next = map_end;
new_mapped_ram_size = init_range_memory_mapping(start, next);
start = next;
if (new_mapped_ram_size > mapped_ram_size)
step_size = get_new_step_size(step_size);
mapped_ram_size += new_mapped_ram_size;
}
}
void __init init_mem_mapping(void)
{
unsigned long end;
probe_page_size_mask();
#ifdef CONFIG_X86_64
end = max_pfn << PAGE_SHIFT;
#else
end = max_low_pfn << PAGE_SHIFT;
#endif
/* the ISA range is always mapped regardless of memory holes */
init_memory_mapping(0, ISA_END_ADDRESS);
/*
* If the allocation is in bottom-up direction, we setup direct mapping
* in bottom-up, otherwise we setup direct mapping in top-down.
*/
if (memblock_bottom_up()) {
unsigned long kernel_end = __pa_symbol(_end);
/*
* we need two separate calls here. This is because we want to
* allocate page tables above the kernel. So we first map
* [kernel_end, end) to make memory above the kernel be mapped
* as soon as possible. And then use page tables allocated above
* the kernel to map [ISA_END_ADDRESS, kernel_end).
*/
memory_map_bottom_up(kernel_end, end);
memory_map_bottom_up(ISA_END_ADDRESS, kernel_end);
} else {
memory_map_top_down(ISA_END_ADDRESS, end);
}
#ifdef CONFIG_X86_64
if (max_pfn > max_low_pfn) {

11
arch/x86/mm/numa.c

@ -567,6 +567,17 @@ static int __init numa_init(int (*init_func)(void))
ret = init_func();
if (ret < 0)
return ret;
/*
* We reset memblock back to the top-down direction
* here because if we configured ACPI_NUMA, we have
* parsed SRAT in init_func(). It is ok to have the
* reset here even if we did't configure ACPI_NUMA
* or acpi numa init fails and fallbacks to dummy
* numa init.
*/
memblock_set_bottom_up(false);
ret = numa_cleanup_meminfo(&numa_meminfo);
if (ret < 0)
return ret;

10
drivers/char/Kconfig

@ -522,10 +522,16 @@ config HPET_MMAP
If you say Y here, user applications will be able to mmap
the HPET registers.
config HPET_MMAP_DEFAULT
bool "Enable HPET MMAP access by default"
default y
depends on HPET_MMAP
help
In some hardware implementations, the page containing HPET
registers may also contain other things that shouldn't be
exposed to the user. If this applies to your hardware,
say N here.
exposed to the user. This option selects the default (if
kernel parameter hpet_mmap is not set) user access to the
registers for applications that require it.
config HANGCHECK_TIMER
tristate "Hangcheck timer"

24
drivers/char/hpet.c

@ -367,12 +367,29 @@ static unsigned int hpet_poll(struct file *file, poll_table * wait)
return 0;
}
#ifdef CONFIG_HPET_MMAP
#ifdef CONFIG_HPET_MMAP_DEFAULT
static int hpet_mmap_enabled = 1;
#else
static int hpet_mmap_enabled = 0;
#endif
static __init int hpet_mmap_enable(char *str)
{
get_option(&str, &hpet_mmap_enabled);
pr_info("HPET mmap %s\n", hpet_mmap_enabled ? "enabled" : "disabled");
return 1;
}
__setup("hpet_mmap", hpet_mmap_enable);
static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
{
#ifdef CONFIG_HPET_MMAP
struct hpet_dev *devp;
unsigned long addr;
if (!hpet_mmap_enabled)
return -EACCES;
devp = file->private_data;
addr = devp->hd_hpets->hp_hpet_phys;
@ -381,10 +398,13 @@ static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
return vm_iomap_memory(vma, addr, PAGE_SIZE);
}
#else
static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
{
return -ENOSYS;
#endif
}
#endif
static int hpet_fasync(int fd, struct file *file, int on)
{

7
drivers/dma/mmp_tdma.c

@ -350,12 +350,7 @@ struct mmp_tdma_desc *mmp_tdma_alloc_descriptor(struct mmp_tdma_chan *tdmac)
if (!gpool)
return NULL;
tdmac->desc_arr = (void *)gen_pool_alloc(gpool, size);
if (!tdmac->desc_arr)
return NULL;
tdmac->desc_arr_phys = gen_pool_virt_to_phys(gpool,
(unsigned long)tdmac->desc_arr);
tdmac->desc_arr = gen_pool_dma_alloc(gpool, size, &tdmac->desc_arr_phys);
return tdmac->desc_arr;
}

2
drivers/iommu/omap-iopgtable.h

@ -95,4 +95,4 @@ static inline phys_addr_t omap_iommu_translate(u32 d, u32 va, u32 mask)
#define iopte_offset(iopgd, da) (iopgd_page_vaddr(iopgd) + iopte_index(da))
#define to_iommu(dev) \
((struct omap_iommu *)platform_get_drvdata(to_platform_device(dev)))
(platform_get_drvdata(to_platform_device(dev)))

5
drivers/media/platform/coda.c

@ -3232,13 +3232,12 @@ static int coda_probe(struct platform_device *pdev)
dev->iram_size = CODA7_IRAM_SIZE;
break;
}
dev->iram_vaddr = gen_pool_alloc(dev->iram_pool, dev->iram_size);
dev->iram_vaddr = (unsigned long)gen_pool_dma_alloc(dev->iram_pool,
dev->iram_size, (dma_addr_t *)&dev->iram_paddr);
if (!dev->iram_vaddr) {
dev_err(&pdev->dev, "unable to alloc iram\n");
return -ENOMEM;
}
dev->iram_paddr = gen_pool_virt_to_phys(dev->iram_pool,
dev->iram_vaddr);
platform_set_drvdata(pdev, dev);

2
drivers/memstick/core/ms_block.c

@ -401,7 +401,7 @@ again:
sizeof(struct ms_status_register)))
return 0;
msb->state = MSB_RP_RECEIVE_OOB_READ;
msb->state = MSB_RP_RECIVE_STATUS_REG;
return 0;
case MSB_RP_RECIVE_STATUS_REG:

4
drivers/memstick/core/mspro_block.c

@ -1023,8 +1023,8 @@ static int mspro_block_read_attributes(struct memstick_dev *card)
} else
attr_count = attr->count;
msb->attr_group.attrs = kzalloc((attr_count + 1)
* sizeof(struct attribute),
msb->attr_group.attrs = kcalloc(attr_count + 1,
sizeof(*msb->attr_group.attrs),
GFP_KERNEL);
if (!msb->attr_group.attrs) {
rc = -ENOMEM;

16
drivers/message/i2o/driver.c

@ -105,7 +105,8 @@ int i2o_driver_register(struct i2o_driver *drv)
osm_err("too many drivers registered, increase "
"max_drivers\n");
spin_unlock_irqrestore(&i2o_drivers_lock, flags);
return -EFAULT;
rc = -EFAULT;
goto out;
}
drv->context = i;
@ -124,11 +125,14 @@ int i2o_driver_register(struct i2o_driver *drv)
}
rc = driver_register(&drv->driver);
if (rc) {
if (drv->event) {
destroy_workqueue(drv->event_queue);
drv->event_queue = NULL;
}
if (rc)
goto out;
return 0;
out:
if (drv->event_queue) {
destroy_workqueue(drv->event_queue);
drv->event_queue = NULL;
}
return rc;

3
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c

@ -1217,9 +1217,6 @@ static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
ETH_VLAN_FILTER_CLASSIFY, config);
}
#define list_next_entry(pos, member) \
list_entry((pos)->member.next, typeof(*(pos)), member)
/**
* bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
*