aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2012-12-05 14:24:36 -0800
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2012-12-05 14:24:36 -0800
commit8f2828f9846021088223c43284d12b8ba0eac0d0 (patch)
tree2f216b6a4ed670300ac2691f2e739f2b9a15ebb0
parente27c040225e4fcf3cdc498b08fba44b61ec6db47 (diff)
downloadltsi-kernel-8f2828f9846021088223c43284d12b8ba0eac0d0.tar.gz
add dma-mapping patches
-rw-r--r--patches.at91/0006-ARM-at91-change-AT91-Kconfig-entry-comment.patch9
-rw-r--r--patches.at91/0213-ARM-at91-add-pinctrl-support.patch41
-rw-r--r--patches.dma-mapping/arm-dma-mapping-add-function-for-setting-coherent-pool-size-from-platform-code.patch92
-rw-r--r--patches.dma-mapping/arm-dma-mapping-add-missing-static-storage-class-specifier.patch52
-rw-r--r--patches.dma-mapping/arm-dma-mapping-add-more-sanity-checks-in-arm_dma_mmap.patch56
-rw-r--r--patches.dma-mapping/arm-dma-mapping-add-support-for-iommu-mapper.patch902
-rw-r--r--patches.dma-mapping/arm-dma-mapping-atomic_pool-with-struct-page-pages.patch90
-rw-r--r--patches.dma-mapping/arm-dma-mapping-fix-atomic-allocation-alignment.patch54
-rw-r--r--patches.dma-mapping/arm-dma-mapping-fix-buffer-chunk-allocation-order.patch49
-rw-r--r--patches.dma-mapping/arm-dma-mapping-fix-debug-messages-in-dmabounce-code.patch81
-rw-r--r--patches.dma-mapping/arm-dma-mapping-fix-error-path-for-memory-allocation-failure.patch40
-rw-r--r--patches.dma-mapping/arm-dma-mapping-fix-incorrect-freeing-of-atomic-allocations.patch81
-rw-r--r--patches.dma-mapping/arm-dma-mapping-fix-potential-memory-leak-in-atomic_pool_init.patch37
-rw-r--r--patches.dma-mapping/arm-dma-mapping-implement-dma-sg-methods-on-top-of-any-generic-dma-ops.patch153
-rw-r--r--patches.dma-mapping/arm-dma-mapping-introduce-dma_error_code-constant.patch96
-rw-r--r--patches.dma-mapping/arm-dma-mapping-modify-condition-check-while-freeing-pages.patch72
-rw-r--r--patches.dma-mapping/arm-dma-mapping-move-all-dma-bounce-code-to-separate-dma-ops-structure.patch425
-rw-r--r--patches.dma-mapping/arm-dma-mapping-print-warning-when-atomic-coherent-allocation-fails.patch41
-rw-r--r--patches.dma-mapping/arm-dma-mapping-refactor-out-to-introduce-__in_atomic_pool.patch70
-rw-r--r--patches.dma-mapping/arm-dma-mapping-remove-custom-consistent-dma-region.patch745
-rw-r--r--patches.dma-mapping/arm-dma-mapping-remove-offset-parameter-to-prepare-for-generic-dma_ops.patch244
-rw-r--r--patches.dma-mapping/arm-dma-mapping-remove-redundant-code-and-do-the-cleanup.patch179
-rw-r--r--patches.dma-mapping/arm-dma-mapping-remove-unconditional-dependency-on-cma.patch88
-rw-r--r--patches.dma-mapping/arm-dma-mapping-use-alloc-mmap-free-from-dma_ops.patch340
-rw-r--r--patches.dma-mapping/arm-dma-mapping-use-asm-generic-dma-mapping-common.h.patch534
-rw-r--r--patches.dma-mapping/arm-dma-mapping-use-dma_mmap_from_coherent.patch41
-rw-r--r--patches.dma-mapping/arm-dma-mapping-use-pmd-size-for-section-unmap.patch49
-rw-r--r--patches.dma-mapping/arm-dma-mapping-use-pr_-instread-of-printk.patch95
-rw-r--r--patches.dma-mapping/arm-fix-warning-caused-by-wrongly-typed-arm_dma_limit.patch37
-rw-r--r--patches.dma-mapping/arm-integrate-cma-with-dma-mapping-subsystem.patch793
-rw-r--r--patches.dma-mapping/arm-mm-fix-dma-pool-affiliation-check.patch68
-rw-r--r--patches.dma-mapping/arm-mm-fix-mmu-mapping-of-cma-regions.patch45
-rw-r--r--patches.dma-mapping/arm-mm-fix-type-of-the-arm_dma_limit-global-variable.patch50
-rw-r--r--patches.dma-mapping/arm-relax-conditions-required-for-enabling-contiguous-memory-allocator.patch44
-rw-r--r--patches.dma-mapping/cma-fix-migration-mode.patch42
-rw-r--r--patches.dma-mapping/common-add-dma_mmap_from_coherent-function.patch115
-rw-r--r--patches.dma-mapping/common-dma-mapping-add-support-for-generic-dma_mmap_-calls.patch306
-rw-r--r--patches.dma-mapping/driver-core-fix-some-kernel-doc-warnings-in-dma-.c.patch47
-rw-r--r--patches.dma-mapping/drivers-add-contiguous-memory-allocator.patch745
-rw-r--r--patches.dma-mapping/iommu-core-pass-a-user-provided-token-to-fault-handlers.patch143
-rw-r--r--patches.dma-mapping/mm-clean-up-__count_immobile_pages.patch112
-rw-r--r--patches.dma-mapping/mm-cma-don-t-replace-lowmem-pages-with-highmem.patch70
-rw-r--r--patches.dma-mapping/mm-cma-fix-alignment-requirements-for-contiguous-regions.patch44
-rw-r--r--patches.dma-mapping/mm-cma-fix-condition-check-when-setting-global-cma-area.patch39
-rw-r--r--patches.dma-mapping/mm-compaction-export-some-of-the-functions.patch511
-rw-r--r--patches.dma-mapping/mm-compaction-introduce-isolate_freepages_range.patch205
-rw-r--r--patches.dma-mapping/mm-compaction-introduce-isolate_migratepages_range.patch152
-rw-r--r--patches.dma-mapping/mm-compaction-introduce-map_pages.patch65
-rw-r--r--patches.dma-mapping/mm-extract-reclaim-code-from-__alloc_pages_direct_reclaim.patch92
-rw-r--r--patches.dma-mapping/mm-factor-out-memory-isolate-functions.patch351
-rw-r--r--patches.dma-mapping/mm-mmzone-migrate_cma-migration-type-added.patch327
-rw-r--r--patches.dma-mapping/mm-page_alloc-change-fallbacks-array-handling.patch71
-rw-r--r--patches.dma-mapping/mm-page_alloc-introduce-alloc_contig_range.patch261
-rw-r--r--patches.dma-mapping/mm-page_alloc-remove-trailing-whitespace.patch73
-rw-r--r--patches.dma-mapping/mm-page_isolation-migrate_cma-isolation-functions-added.patch256
-rw-r--r--patches.dma-mapping/mm-serialize-access-to-min_free_kbytes.patch73
-rw-r--r--patches.dma-mapping/mm-trigger-page-reclaim-in-alloc_contig_range-to-stabilise-watermarks.patch151
-rw-r--r--patches.dma-mapping/mm-vmalloc-use-const-void-for-caller-argument.patch145
-rw-r--r--patches.dma-mapping/x86-dma-mapping-fix-broken-allocation-when-dma_mask-has-been-provided.patch50
-rw-r--r--patches.dma-mapping/x86-integrate-cma-with-dma-mapping-subsystem.patch162
-rw-r--r--patches.kzm9d/arm-mach-shmobile-fix-build-when-smp-is-enabled-and-emev2-is-not-enabled.patch5
-rw-r--r--patches.marzen/0016-devicetree-add-helper-inline-for-retrieving-a-node-s.patch75
-rw-r--r--patches.marzen/0037-ARM-provide-a-late_initcall-hook-for-platform-initia.patch13
-rw-r--r--series67
64 files changed, 10459 insertions, 102 deletions
diff --git a/patches.at91/0006-ARM-at91-change-AT91-Kconfig-entry-comment.patch b/patches.at91/0006-ARM-at91-change-AT91-Kconfig-entry-comment.patch
index c27d6d203c63b..af7e2fd5394da 100644
--- a/patches.at91/0006-ARM-at91-change-AT91-Kconfig-entry-comment.patch
+++ b/patches.at91/0006-ARM-at91-change-AT91-Kconfig-entry-comment.patch
@@ -7,14 +7,12 @@ commit 929e994f7e249a58f50ccdd066da9899a6e39c7a upstream.
Signed-off-by: Nicolas Ferre <nicolas.ferre@atmel.com>
---
- arch/arm/Kconfig | 4 ++--
+ arch/arm/Kconfig | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
-diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
-index 7a8660a..ddef021 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
-@@ -340,8 +340,8 @@ config ARCH_AT91
+@@ -350,8 +350,8 @@ config ARCH_AT91
select IRQ_DOMAIN
select NEED_MACH_IO_H if PCCARD
help
@@ -25,6 +23,3 @@ index 7a8660a..ddef021 100644
config ARCH_BCMRING
bool "Broadcom BCMRING"
---
-1.8.0.197.g5a90748
-
diff --git a/patches.at91/0213-ARM-at91-add-pinctrl-support.patch b/patches.at91/0213-ARM-at91-add-pinctrl-support.patch
index 381942cfb7fe5..5a1c8f4efee7a 100644
--- a/patches.at91/0213-ARM-at91-add-pinctrl-support.patch
+++ b/patches.at91/0213-ARM-at91-add-pinctrl-support.patch
@@ -15,20 +15,17 @@ Signed-off-by: Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
Conflicts:
arch/arm/mach-at91/board-dt.c
---
- .../bindings/pinctrl/atmel,at91-pinctrl.txt | 84 ++
- arch/arm/Kconfig | 2 +
- arch/arm/mach-at91/board-dt.c | 2 -
- arch/arm/mach-at91/gpio.c | 165 +--
- drivers/pinctrl/Kconfig | 9 +
- drivers/pinctrl/Makefile | 1 +
- drivers/pinctrl/pinctrl-at91.c | 1490 ++++++++++++++++++++
+ Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt | 84
+ arch/arm/Kconfig | 2
+ arch/arm/mach-at91/board-dt.c | 2
+ arch/arm/mach-at91/gpio.c | 165 -
+ drivers/pinctrl/Kconfig | 9
+ drivers/pinctrl/Makefile | 1
+ drivers/pinctrl/pinctrl-at91.c | 1490 ++++++++++
7 files changed, 1591 insertions(+), 162 deletions(-)
create mode 100644 Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt
create mode 100644 drivers/pinctrl/pinctrl-at91.c
-diff --git a/Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt
-new file mode 100644
-index 0000000..0296ef4
--- /dev/null
+++ b/Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt
@@ -0,0 +1,84 @@
@@ -116,11 +113,9 @@ index 0000000..0296ef4
+ pinctrl-0 = <&pinctrl_dbgu>;
+ status = "disabled";
+};
-diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
-index ddef021..ea72f88 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
-@@ -339,6 +339,8 @@ config ARCH_AT91
+@@ -349,6 +349,8 @@ config ARCH_AT91
select CLKDEV_LOOKUP
select IRQ_DOMAIN
select NEED_MACH_IO_H if PCCARD
@@ -129,11 +124,9 @@ index ddef021..ea72f88 100644
help
This enables support for systems based on Atmel
AT91RM9200 and AT91SAM9* processors.
-diff --git a/arch/arm/mach-at91/board-dt.c b/arch/arm/mach-at91/board-dt.c
-index c0d242c..b1a5d3c 100644
--- a/arch/arm/mach-at91/board-dt.c
+++ b/arch/arm/mach-at91/board-dt.c
-@@ -127,8 +127,6 @@ struct of_dev_auxdata at91_auxdata_lookup[] __initdata = {
+@@ -127,8 +127,6 @@ struct of_dev_auxdata at91_auxdata_looku
static const struct of_device_id irq_of_match[] __initconst = {
{ .compatible = "atmel,at91rm9200-aic", .data = at91_aic_of_init },
@@ -142,8 +135,6 @@ index c0d242c..b1a5d3c 100644
{ /*sentinel*/ }
};
-diff --git a/arch/arm/mach-at91/gpio.c b/arch/arm/mach-at91/gpio.c
-index a34f0ed..c5d7e1e 100644
--- a/arch/arm/mach-at91/gpio.c
+++ b/arch/arm/mach-at91/gpio.c
@@ -23,8 +23,6 @@
@@ -155,7 +146,7 @@ index a34f0ed..c5d7e1e 100644
#include <asm/mach/irq.h>
-@@ -717,80 +715,6 @@ postcore_initcall(at91_gpio_debugfs_init);
+@@ -717,80 +715,6 @@ postcore_initcall(at91_gpio_debugfs_init
*/
static struct lock_class_key gpio_lock_class;
@@ -322,7 +313,7 @@ index a34f0ed..c5d7e1e 100644
static void __init at91_gpio_init_one(int idx, u32 regbase, int pioc_hwirq)
{
struct at91_gpio_chip *at91_gpio = &gpio_chip[idx];
-@@ -1102,11 +947,11 @@ void __init at91_gpio_init(struct at91_gpio_bank *data, int nr_banks)
+@@ -1102,11 +947,11 @@ void __init at91_gpio_init(struct at91_g
BUG_ON(nr_banks > MAX_GPIO_BANKS);
@@ -339,8 +330,6 @@ index a34f0ed..c5d7e1e 100644
for (i = 0; i < gpio_banks; i++) {
at91_gpio = &gpio_chip[i];
-diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
-index f73a5ea..6a03072 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -26,6 +26,15 @@ config DEBUG_PINCTRL
@@ -359,8 +348,6 @@ index f73a5ea..6a03072 100644
config PINCTRL_PXA3xx
bool
select PINMUX
-diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
-index 8e3c95a..84f4670 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -9,6 +9,7 @@ ifeq ($(CONFIG_OF),y)
@@ -371,9 +358,6 @@ index 8e3c95a..84f4670 100644
obj-$(CONFIG_PINCTRL_PXA3xx) += pinctrl-pxa3xx.o
obj-$(CONFIG_PINCTRL_MMP2) += pinctrl-mmp2.o
obj-$(CONFIG_PINCTRL_PXA168) += pinctrl-pxa168.o
-diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
-new file mode 100644
-index 0000000..e4712d1
--- /dev/null
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -0,0 +1,1490 @@
@@ -1867,6 +1851,3 @@ index 0000000..e4712d1
+MODULE_AUTHOR("Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>");
+MODULE_DESCRIPTION("Atmel AT91 pinctrl driver");
+MODULE_LICENSE("GPL v2");
---
-1.8.0.197.g5a90748
-
diff --git a/patches.dma-mapping/arm-dma-mapping-add-function-for-setting-coherent-pool-size-from-platform-code.patch b/patches.dma-mapping/arm-dma-mapping-add-function-for-setting-coherent-pool-size-from-platform-code.patch
new file mode 100644
index 0000000000000..7c5a240c58259
--- /dev/null
+++ b/patches.dma-mapping/arm-dma-mapping-add-function-for-setting-coherent-pool-size-from-platform-code.patch
@@ -0,0 +1,92 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:54:00 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:51:10 +0900
+Subject: [PATCH v2 53/58] ARM: DMA-Mapping: add function for setting coherent pool size from platform code
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-54-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+
+Some platforms might require to increase atomic coherent pool to make
+sure that their device will be able to allocate all their buffers from
+atomic context. This function can be also used to decrease atomic
+coherent pool size if coherent allocations are not used for the given
+sub-platform.
+
+Suggested-by: Josh Coombs <josh.coombs@gmail.com>
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+(cherry picked from commit 6e5267aa543817015edb4a65c66e15f9809f92bd)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+---
+ arch/arm/include/asm/dma-mapping.h | 7 +++++++
+ arch/arm/mm/dma-mapping.c | 19 ++++++++++++++++++-
+ 2 files changed, 25 insertions(+), 1 deletions(-)
+
+diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
+index a048033..b5745a8 100644
+--- a/arch/arm/include/asm/dma-mapping.h
++++ b/arch/arm/include/asm/dma-mapping.h
+@@ -203,6 +203,13 @@ static inline void dma_free_writecombine(struct device *dev, size_t size,
+ }
+
+ /*
++ * This can be called during early boot to increase the size of the atomic
++ * coherent DMA pool above the default value of 256KiB. It must be called
++ * before postcore_initcall.
++ */
++extern void __init init_dma_coherent_pool_size(unsigned long size);
++
++/*
+ * This can be called during boot to increase the size of the consistent
+ * DMA region above it's default value of 2MB. It must be called before the
+ * memory allocator is initialised, i.e. before any core_initcall.
+diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
+index 258da10..3ff2585 100644
+--- a/arch/arm/mm/dma-mapping.c
++++ b/arch/arm/mm/dma-mapping.c
+@@ -266,6 +266,8 @@ static void __dma_free_remap(void *cpu_addr, size_t size)
+ vunmap(cpu_addr);
+ }
+
++#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
++
+ struct dma_pool {
+ size_t size;
+ spinlock_t lock;
+@@ -276,7 +278,7 @@ struct dma_pool {
+ };
+
+ static struct dma_pool atomic_pool = {
+- .size = SZ_256K,
++ .size = DEFAULT_DMA_COHERENT_POOL_SIZE,
+ };
+
+ static int __init early_coherent_pool(char *p)
+@@ -286,6 +288,21 @@ static int __init early_coherent_pool(char *p)
+ }
+ early_param("coherent_pool", early_coherent_pool);
+
++void __init init_dma_coherent_pool_size(unsigned long size)
++{
++ /*
++ * Catch any attempt to set the pool size too late.
++ */
++ BUG_ON(atomic_pool.vaddr);
++
++ /*
++ * Set architecture specific coherent pool size only if
++ * it has not been changed by kernel command line parameter.
++ */
++ if (atomic_pool.size == DEFAULT_DMA_COHERENT_POOL_SIZE)
++ atomic_pool.size = size;
++}
++
+ /*
+ * Initialise the coherent pool for atomic allocations.
+ */
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/arm-dma-mapping-add-missing-static-storage-class-specifier.patch b/patches.dma-mapping/arm-dma-mapping-add-missing-static-storage-class-specifier.patch
new file mode 100644
index 0000000000000..99517e928e6e6
--- /dev/null
+++ b/patches.dma-mapping/arm-dma-mapping-add-missing-static-storage-class-specifier.patch
@@ -0,0 +1,52 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:53:04 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:50:48 +0900
+Subject: [PATCH v2 31/58] ARM: dma-mapping: Add missing static storage class specifier
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-32-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Sachin Kamat <sachin.kamat@linaro.org>
+
+Fixes the following sparse warnings:
+arch/arm/mm/dma-mapping.c:231:15: warning: symbol 'consistent_base' was not
+declared. Should it be static?
+arch/arm/mm/dma-mapping.c:326:8: warning: symbol 'coherent_pool_size' was not
+declared. Should it be static?
+
+Signed-off-by: Sachin Kamat <sachin.kamat@linaro.org>
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+(cherry picked from commit e53f517ff236a0ec5413ff3935c53406b69bc1e2)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+---
+ arch/arm/mm/dma-mapping.c | 4 ++--
+ 1 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
+index 106c4c0..d766e42 100644
+--- a/arch/arm/mm/dma-mapping.c
++++ b/arch/arm/mm/dma-mapping.c
+@@ -228,7 +228,7 @@ static pte_t **consistent_pte;
+
+ #define DEFAULT_CONSISTENT_DMA_SIZE SZ_2M
+
+-unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE;
++static unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE;
+
+ void __init init_consistent_dma_size(unsigned long size)
+ {
+@@ -321,7 +321,7 @@ static struct arm_vmregion_head coherent_head = {
+ .vm_list = LIST_HEAD_INIT(coherent_head.vm_list),
+ };
+
+-size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8;
++static size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8;
+
+ static int __init early_coherent_pool(char *p)
+ {
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/arm-dma-mapping-add-more-sanity-checks-in-arm_dma_mmap.patch b/patches.dma-mapping/arm-dma-mapping-add-more-sanity-checks-in-arm_dma_mmap.patch
new file mode 100644
index 0000000000000..eeead73998fe5
--- /dev/null
+++ b/patches.dma-mapping/arm-dma-mapping-add-more-sanity-checks-in-arm_dma_mmap.patch
@@ -0,0 +1,56 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:53:33 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:50:59 +0900
+Subject: [PATCH v2 42/58] ARM: dma-mapping: add more sanity checks in arm_dma_mmap()
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-43-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+
+Add some sanity checks and forbid mmaping of buffers into vma areas larger
+than allocated dma buffer.
+
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+(cherry picked from commit 50262a4bf38dd70486e9fce2b8235d5ae3e0f627)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+---
+ arch/arm/mm/dma-mapping.c | 14 ++++++++++----
+ 1 files changed, 10 insertions(+), 4 deletions(-)
+
+diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
+index d1dff4a3..6a94d17 100644
+--- a/arch/arm/mm/dma-mapping.c
++++ b/arch/arm/mm/dma-mapping.c
+@@ -611,16 +611,22 @@ int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ {
+ int ret = -ENXIO;
+ #ifdef CONFIG_MMU
++ unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
++ unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ unsigned long pfn = dma_to_pfn(dev, dma_addr);
++ unsigned long off = vma->vm_pgoff;
++
+ vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
+
+ if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+ return ret;
+
+- ret = remap_pfn_range(vma, vma->vm_start,
+- pfn + vma->vm_pgoff,
+- vma->vm_end - vma->vm_start,
+- vma->vm_page_prot);
++ if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
++ ret = remap_pfn_range(vma, vma->vm_start,
++ pfn + off,
++ vma->vm_end - vma->vm_start,
++ vma->vm_page_prot);
++ }
+ #endif /* CONFIG_MMU */
+
+ return ret;
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/arm-dma-mapping-add-support-for-iommu-mapper.patch b/patches.dma-mapping/arm-dma-mapping-add-support-for-iommu-mapper.patch
new file mode 100644
index 0000000000000..04f0cc75bbabb
--- /dev/null
+++ b/patches.dma-mapping/arm-dma-mapping-add-support-for-iommu-mapper.patch
@@ -0,0 +1,902 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:52:58 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:50:45 +0900
+Subject: [PATCH v2 28/58] ARM: dma-mapping: add support for IOMMU mapper
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-29-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+
+This patch add a complete implementation of DMA-mapping API for
+devices which have IOMMU support.
+
+This implementation tries to optimize dma address space usage by remapping
+all possible physical memory chunks into a single dma address space chunk.
+
+DMA address space is managed on top of the bitmap stored in the
+dma_iommu_mapping structure stored in device->archdata. Platform setup
+code has to initialize parameters of the dma address space (base address,
+size, allocation precision order) with arm_iommu_create_mapping() function.
+To reduce the size of the bitmap, all allocations are aligned to the
+specified order of base 4 KiB pages.
+
+dma_alloc_* functions allocate physical memory in chunks, each with
+alloc_pages() function to avoid failing if the physical memory gets
+fragmented. In worst case the allocated buffer is composed of 4 KiB page
+chunks.
+
+dma_map_sg() function minimizes the total number of dma address space
+chunks by merging of physical memory chunks into one larger dma address
+space chunk. If requested chunk (scatter list entry) boundaries
+match physical page boundaries, most calls to dma_map_sg() requests will
+result in creating only one chunk in dma address space.
+
+dma_map_page() simply creates a mapping for the given page(s) in the dma
+address space.
+
+All dma functions also perform required cache operation like their
+counterparts from the arm linear physical memory mapping version.
+
+This patch contains code and fixes kindly provided by:
+- Krishna Reddy <vdumpa@nvidia.com>,
+- Andrzej Pietrasiewicz <andrzej.p@samsung.com>,
+- Hiroshi DOYU <hdoyu@nvidia.com>
+
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Acked-by: Kyungmin Park <kyungmin.park@samsung.com>
+Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+Tested-By: Subash Patel <subash.ramaswamy@linaro.org>
+(cherry picked from commit 4ce63fcd919c32d22528e54dcd89506962933719)
+
+Conflicts:
+
+ arch/arm/mm/dma-mapping.c
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+---
+ arch/arm/Kconfig | 8 +
+ arch/arm/include/asm/device.h | 3 +
+ arch/arm/include/asm/dma-iommu.h | 34 ++
+ arch/arm/mm/dma-mapping.c | 694 +++++++++++++++++++++++++++++++++++++-
+ arch/arm/mm/vmregion.h | 2 +-
+ 5 files changed, 736 insertions(+), 5 deletions(-)
+ create mode 100644 arch/arm/include/asm/dma-iommu.h
+
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index 9bc6663..c7a542c 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -48,6 +48,14 @@ config ARM
+ config ARM_HAS_SG_CHAIN
+ bool
+
++config NEED_SG_DMA_LENGTH
++ bool
++
++config ARM_DMA_USE_IOMMU
++ select NEED_SG_DMA_LENGTH
++ select ARM_HAS_SG_CHAIN
++ bool
++
+ config HAVE_PWM
+ bool
+
+diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
+index 6e2cb0e..b69c0d3 100644
+--- a/arch/arm/include/asm/device.h
++++ b/arch/arm/include/asm/device.h
+@@ -14,6 +14,9 @@ struct dev_archdata {
+ #ifdef CONFIG_IOMMU_API
+ void *iommu; /* private IOMMU data */
+ #endif
++#ifdef CONFIG_ARM_DMA_USE_IOMMU
++ struct dma_iommu_mapping *mapping;
++#endif
+ };
+
+ struct omap_device;
+diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h
+new file mode 100644
+index 0000000..799b094
+--- /dev/null
++++ b/arch/arm/include/asm/dma-iommu.h
+@@ -0,0 +1,34 @@
++#ifndef ASMARM_DMA_IOMMU_H
++#define ASMARM_DMA_IOMMU_H
++
++#ifdef __KERNEL__
++
++#include <linux/mm_types.h>
++#include <linux/scatterlist.h>
++#include <linux/dma-debug.h>
++#include <linux/kmemcheck.h>
++
++struct dma_iommu_mapping {
++ /* iommu specific data */
++ struct iommu_domain *domain;
++
++ void *bitmap;
++ size_t bits;
++ unsigned int order;
++ dma_addr_t base;
++
++ spinlock_t lock;
++ struct kref kref;
++};
++
++struct dma_iommu_mapping *
++arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
++ int order);
++
++void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping);
++
++int arm_iommu_attach_device(struct device *dev,
++ struct dma_iommu_mapping *mapping);
++
++#endif /* __KERNEL__ */
++#endif
+diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
+index ee4cb48..ea6b431 100644
+--- a/arch/arm/mm/dma-mapping.c
++++ b/arch/arm/mm/dma-mapping.c
+@@ -21,6 +21,8 @@
+ #include <linux/highmem.h>
+ #include <linux/memblock.h>
+ #include <linux/slab.h>
++#include <linux/iommu.h>
++#include <linux/vmalloc.h>
+
+ #include <asm/memory.h>
+ #include <asm/highmem.h>
+@@ -28,6 +30,7 @@
+ #include <asm/tlbflush.h>
+ #include <asm/sizes.h>
+ #include <asm/mach/arch.h>
++#include <asm/dma-iommu.h>
+ #include <asm/mach/map.h>
+ #include <asm/system_info.h>
+ #include <asm/dma-contiguous.h>
+@@ -168,9 +171,11 @@ static void __dma_clear_buffer(struct page *page, size_t size)
+ * lurking in the kernel direct-mapped region is invalidated.
+ */
+ ptr = page_address(page);
+- memset(ptr, 0, size);
+- dmac_flush_range(ptr, ptr + size);
+- outer_flush_range(__pa(ptr), __pa(ptr) + size);
++ if (ptr) {
++ memset(ptr, 0, size);
++ dmac_flush_range(ptr, ptr + size);
++ outer_flush_range(__pa(ptr), __pa(ptr) + size);
++ }
+ }
+
+ /*
+@@ -263,8 +268,10 @@ static int __init consistent_init(void)
+ unsigned long base = consistent_base;
+ unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT;
+
++#ifndef CONFIG_ARM_DMA_USE_IOMMU
+ if (cpu_architecture() >= CPU_ARCH_ARMv6)
+ return 0;
++#endif
+
+ consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL);
+ if (!consistent_pte) {
+@@ -437,7 +444,7 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
+ u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
+
+ pte = consistent_pte[idx] + off;
+- c->vm_pages = page;
++ c->priv = page;
+
+ do {
+ BUG_ON(!pte_none(*pte));
+@@ -889,6 +896,9 @@ int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ int i, j;
+
+ for_each_sg(sg, s, nents, i) {
++#ifdef CONFIG_NEED_SG_DMA_LENGTH
++ s->dma_length = s->length;
++#endif
+ s->dma_address = ops->map_page(dev, sg_page(s), s->offset,
+ s->length, dir, attrs);
+ if (dma_mapping_error(dev, s->dma_address))
+@@ -997,3 +1007,679 @@ static int __init dma_debug_do_init(void)
+ return 0;
+ }
+ fs_initcall(dma_debug_do_init);
++
++#ifdef CONFIG_ARM_DMA_USE_IOMMU
++
++/* IOMMU */
++
++static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
++ size_t size)
++{
++ unsigned int order = get_order(size);
++ unsigned int align = 0;
++ unsigned int count, start;
++ unsigned long flags;
++
++ count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) +
++ (1 << mapping->order) - 1) >> mapping->order;
++
++ if (order > mapping->order)
++ align = (1 << (order - mapping->order)) - 1;
++
++ spin_lock_irqsave(&mapping->lock, flags);
++ start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0,
++ count, align);
++ if (start > mapping->bits) {
++ spin_unlock_irqrestore(&mapping->lock, flags);
++ return DMA_ERROR_CODE;
++ }
++
++ bitmap_set(mapping->bitmap, start, count);
++ spin_unlock_irqrestore(&mapping->lock, flags);
++
++ return mapping->base + (start << (mapping->order + PAGE_SHIFT));
++}
++
++static inline void __free_iova(struct dma_iommu_mapping *mapping,
++ dma_addr_t addr, size_t size)
++{
++ unsigned int start = (addr - mapping->base) >>
++ (mapping->order + PAGE_SHIFT);
++ unsigned int count = ((size >> PAGE_SHIFT) +
++ (1 << mapping->order) - 1) >> mapping->order;
++ unsigned long flags;
++
++ spin_lock_irqsave(&mapping->lock, flags);
++ bitmap_clear(mapping->bitmap, start, count);
++ spin_unlock_irqrestore(&mapping->lock, flags);
++}
++
++static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
++{
++ struct page **pages;
++ int count = size >> PAGE_SHIFT;
++ int array_size = count * sizeof(struct page *);
++ int i = 0;
++
++ if (array_size <= PAGE_SIZE)
++ pages = kzalloc(array_size, gfp);
++ else
++ pages = vzalloc(array_size);
++ if (!pages)
++ return NULL;
++
++ while (count) {
++ int j, order = __ffs(count);
++
++ pages[i] = alloc_pages(gfp | __GFP_NOWARN, order);
++ while (!pages[i] && order)
++ pages[i] = alloc_pages(gfp | __GFP_NOWARN, --order);
++ if (!pages[i])
++ goto error;
++
++ if (order)
++ split_page(pages[i], order);
++ j = 1 << order;
++ while (--j)
++ pages[i + j] = pages[i] + j;
++
++ __dma_clear_buffer(pages[i], PAGE_SIZE << order);
++ i += 1 << order;
++ count -= 1 << order;
++ }
++
++ return pages;
++error:
++ while (--i)
++ if (pages[i])
++ __free_pages(pages[i], 0);
++ if (array_size < PAGE_SIZE)
++ kfree(pages);
++ else
++ vfree(pages);
++ return NULL;
++}
++
++static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t size)
++{
++ int count = size >> PAGE_SHIFT;
++ int array_size = count * sizeof(struct page *);
++ int i;
++ for (i = 0; i < count; i++)
++ if (pages[i])
++ __free_pages(pages[i], 0);
++ if (array_size < PAGE_SIZE)
++ kfree(pages);
++ else
++ vfree(pages);
++ return 0;
++}
++
++/*
++ * Create a CPU mapping for a specified pages
++ */
++static void *
++__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot)
++{
++ struct arm_vmregion *c;
++ size_t align;
++ size_t count = size >> PAGE_SHIFT;
++ int bit;
++
++ if (!consistent_pte[0]) {
++ pr_err("%s: not initialised\n", __func__);
++ dump_stack();
++ return NULL;
++ }
++
++ /*
++ * Align the virtual region allocation - maximum alignment is
++ * a section size, minimum is a page size. This helps reduce
++ * fragmentation of the DMA space, and also prevents allocations
++ * smaller than a section from crossing a section boundary.
++ */
++ bit = fls(size - 1);
++ if (bit > SECTION_SHIFT)
++ bit = SECTION_SHIFT;
++ align = 1 << bit;
++
++ /*
++ * Allocate a virtual address in the consistent mapping region.
++ */
++ c = arm_vmregion_alloc(&consistent_head, align, size,
++ gfp & ~(__GFP_DMA | __GFP_HIGHMEM), NULL);
++ if (c) {
++ pte_t *pte;
++ int idx = CONSISTENT_PTE_INDEX(c->vm_start);
++ int i = 0;
++ u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
++
++ pte = consistent_pte[idx] + off;
++ c->priv = pages;
++
++ do {
++ BUG_ON(!pte_none(*pte));
++
++ set_pte_ext(pte, mk_pte(pages[i], prot), 0);
++ pte++;
++ off++;
++ i++;
++ if (off >= PTRS_PER_PTE) {
++ off = 0;
++ pte = consistent_pte[++idx];
++ }
++ } while (i < count);
++
++ dsb();
++
++ return (void *)c->vm_start;
++ }
++ return NULL;
++}
++
++/*
++ * Create a mapping in device IO address space for specified pages
++ */
++static dma_addr_t
++__iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
++{
++ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
++ unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
++ dma_addr_t dma_addr, iova;
++ int i, ret = DMA_ERROR_CODE;
++
++ dma_addr = __alloc_iova(mapping, size);
++ if (dma_addr == DMA_ERROR_CODE)
++ return dma_addr;
++
++ iova = dma_addr;
++ for (i = 0; i < count; ) {
++ unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
++ phys_addr_t phys = page_to_phys(pages[i]);
++ unsigned int len, j;
++
++ for (j = i + 1; j < count; j++, next_pfn++)
++ if (page_to_pfn(pages[j]) != next_pfn)
++ break;
++
++ len = (j - i) << PAGE_SHIFT;
++ ret = iommu_map(mapping->domain, iova, phys, len, 0);
++ if (ret < 0)
++ goto fail;
++ iova += len;
++ i = j;
++ }
++ return dma_addr;
++fail:
++ iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
++ __free_iova(mapping, dma_addr, size);
++ return DMA_ERROR_CODE;
++}
++
++static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
++{
++ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
++
++ /*
++ * add optional in-page offset from iova to size and align
++ * result to page size
++ */
++ size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
++ iova &= PAGE_MASK;
++
++ iommu_unmap(mapping->domain, iova, size);
++ __free_iova(mapping, iova, size);
++ return 0;
++}
++
++static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
++ dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
++{
++ pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
++ struct page **pages;
++ void *addr = NULL;
++
++ *handle = DMA_ERROR_CODE;
++ size = PAGE_ALIGN(size);
++
++ pages = __iommu_alloc_buffer(dev, size, gfp);
++ if (!pages)
++ return NULL;
++
++ *handle = __iommu_create_mapping(dev, pages, size);
++ if (*handle == DMA_ERROR_CODE)
++ goto err_buffer;
++
++ addr = __iommu_alloc_remap(pages, size, gfp, prot);
++ if (!addr)
++ goto err_mapping;
++
++ return addr;
++
++err_mapping:
++ __iommu_remove_mapping(dev, *handle, size);
++err_buffer:
++ __iommu_free_buffer(dev, pages, size);
++ return NULL;
++}
++
++static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
++ void *cpu_addr, dma_addr_t dma_addr, size_t size,
++ struct dma_attrs *attrs)
++{
++ struct arm_vmregion *c;
++
++ vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
++ c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
++
++ if (c) {
++ struct page **pages = c->priv;
++
++ unsigned long uaddr = vma->vm_start;
++ unsigned long usize = vma->vm_end - vma->vm_start;
++ int i = 0;
++
++ do {
++ int ret;
++
++ ret = vm_insert_page(vma, uaddr, pages[i++]);
++ if (ret) {
++ pr_err("Remapping memory, error: %d\n", ret);
++ return ret;
++ }
++
++ uaddr += PAGE_SIZE;
++ usize -= PAGE_SIZE;
++ } while (usize > 0);
++ }
++ return 0;
++}
++
++/*
++ * free a page as defined by the above mapping.
++ * Must not be called with IRQs disabled.
++ */
++void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
++ dma_addr_t handle, struct dma_attrs *attrs)
++{
++ struct arm_vmregion *c;
++ size = PAGE_ALIGN(size);
++
++ c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
++ if (c) {
++ struct page **pages = c->priv;
++ __dma_free_remap(cpu_addr, size);
++ __iommu_remove_mapping(dev, handle, size);
++ __iommu_free_buffer(dev, pages, size);
++ }
++}
++
++/*
++ * Map a part of the scatter-gather list into contiguous io address space
++ */
++static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
++ size_t size, dma_addr_t *handle,
++ enum dma_data_direction dir)
++{
++ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
++ dma_addr_t iova, iova_base;
++ int ret = 0;
++ unsigned int count;
++ struct scatterlist *s;
++
++ size = PAGE_ALIGN(size);
++ *handle = DMA_ERROR_CODE;
++
++ iova_base = iova = __alloc_iova(mapping, size);
++ if (iova == DMA_ERROR_CODE)
++ return -ENOMEM;
++
++ for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
++ phys_addr_t phys = page_to_phys(sg_page(s));
++ unsigned int len = PAGE_ALIGN(s->offset + s->length);
++
++ if (!arch_is_coherent())
++ __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
++
++ ret = iommu_map(mapping->domain, iova, phys, len, 0);
++ if (ret < 0)
++ goto fail;
++ count += len >> PAGE_SHIFT;
++ iova += len;
++ }
++ *handle = iova_base;
++
++ return 0;
++fail:
++ iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE);
++ __free_iova(mapping, iova_base, size);
++ return ret;
++}
++
++/**
++ * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
++ * @dev: valid struct device pointer
++ * @sg: list of buffers
++ * @nents: number of buffers to map
++ * @dir: DMA transfer direction
++ *
++ * Map a set of buffers described by scatterlist in streaming mode for DMA.
++ * The scatter gather list elements are merged together (if possible) and
++ * tagged with the appropriate dma address and length. They are obtained via
++ * sg_dma_{address,length}.
++ */
++int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
++ enum dma_data_direction dir, struct dma_attrs *attrs)
++{
++ struct scatterlist *s = sg, *dma = sg, *start = sg;
++ int i, count = 0;
++ unsigned int offset = s->offset;
++ unsigned int size = s->offset + s->length;
++ unsigned int max = dma_get_max_seg_size(dev);
++
++ for (i = 1; i < nents; i++) {
++ s = sg_next(s);
++
++ s->dma_address = DMA_ERROR_CODE;
++ s->dma_length = 0;
++
++ if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
++ if (__map_sg_chunk(dev, start, size, &dma->dma_address,
++ dir) < 0)
++ goto bad_mapping;
++
++ dma->dma_address += offset;
++ dma->dma_length = size - offset;
++
++ size = offset = s->offset;
++ start = s;
++ dma = sg_next(dma);
++ count += 1;
++ }
++ size += s->length;
++ }
++ if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir) < 0)
++ goto bad_mapping;
++
++ dma->dma_address += offset;
++ dma->dma_length = size - offset;
++
++ return count+1;
++
++bad_mapping:
++ for_each_sg(sg, s, count, i)
++ __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s));
++ return 0;
++}
++
++/**
++ * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
++ * @dev: valid struct device pointer
++ * @sg: list of buffers
++ * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
++ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
++ *
++ * Unmap a set of streaming mode DMA translations. Again, CPU access
++ * rules concerning calls here are the same as for dma_unmap_single().
++ */
++void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
++ enum dma_data_direction dir, struct dma_attrs *attrs)
++{
++ struct scatterlist *s;
++ int i;
++
++ for_each_sg(sg, s, nents, i) {
++ if (sg_dma_len(s))
++ __iommu_remove_mapping(dev, sg_dma_address(s),
++ sg_dma_len(s));
++ if (!arch_is_coherent())
++ __dma_page_dev_to_cpu(sg_page(s), s->offset,
++ s->length, dir);
++ }
++}
++
++/**
++ * arm_iommu_sync_sg_for_cpu
++ * @dev: valid struct device pointer
++ * @sg: list of buffers
++ * @nents: number of buffers to map (returned from dma_map_sg)
++ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
++ */
++void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
++ int nents, enum dma_data_direction dir)
++{
++ struct scatterlist *s;
++ int i;
++
++ for_each_sg(sg, s, nents, i)
++ if (!arch_is_coherent())
++ __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
++
++}
++
++/**
++ * arm_iommu_sync_sg_for_device
++ * @dev: valid struct device pointer
++ * @sg: list of buffers
++ * @nents: number of buffers to map (returned from dma_map_sg)
++ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
++ */
++void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
++ int nents, enum dma_data_direction dir)
++{
++ struct scatterlist *s;
++ int i;
++
++ for_each_sg(sg, s, nents, i)
++ if (!arch_is_coherent())
++ __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
++}
++
++
++/**
++ * arm_iommu_map_page
++ * @dev: valid struct device pointer
++ * @page: page that buffer resides in
++ * @offset: offset into page for start of buffer
++ * @size: size of buffer to map
++ * @dir: DMA transfer direction
++ *
++ * IOMMU aware version of arm_dma_map_page()
++ */
++static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
++ unsigned long offset, size_t size, enum dma_data_direction dir,
++ struct dma_attrs *attrs)
++{
++ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
++ dma_addr_t dma_addr;
++ int ret, len = PAGE_ALIGN(size + offset);
++
++ if (!arch_is_coherent())
++ __dma_page_cpu_to_dev(page, offset, size, dir);
++
++ dma_addr = __alloc_iova(mapping, len);
++ if (dma_addr == DMA_ERROR_CODE)
++ return dma_addr;
++
++ ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, 0);
++ if (ret < 0)
++ goto fail;
++
++ return dma_addr + offset;
++fail:
++ __free_iova(mapping, dma_addr, len);
++ return DMA_ERROR_CODE;
++}
++
++/**
++ * arm_iommu_unmap_page
++ * @dev: valid struct device pointer
++ * @handle: DMA address of buffer
++ * @size: size of buffer (same as passed to dma_map_page)
++ * @dir: DMA transfer direction (same as passed to dma_map_page)
++ *
++ * IOMMU aware version of arm_dma_unmap_page()
++ */
++static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
++ size_t size, enum dma_data_direction dir,
++ struct dma_attrs *attrs)
++{
++ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
++ dma_addr_t iova = handle & PAGE_MASK;
++ struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
++ int offset = handle & ~PAGE_MASK;
++ int len = PAGE_ALIGN(size + offset);
++
++ if (!iova)
++ return;
++
++ if (!arch_is_coherent())
++ __dma_page_dev_to_cpu(page, offset, size, dir);
++
++ iommu_unmap(mapping->domain, iova, len);
++ __free_iova(mapping, iova, len);
++}
++
++static void arm_iommu_sync_single_for_cpu(struct device *dev,
++ dma_addr_t handle, size_t size, enum dma_data_direction dir)
++{
++ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
++ dma_addr_t iova = handle & PAGE_MASK;
++ struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
++ unsigned int offset = handle & ~PAGE_MASK;
++
++ if (!iova)
++ return;
++
++ if (!arch_is_coherent())
++ __dma_page_dev_to_cpu(page, offset, size, dir);
++}
++
++static void arm_iommu_sync_single_for_device(struct device *dev,
++ dma_addr_t handle, size_t size, enum dma_data_direction dir)
++{
++ struct dma_iommu_mapping *mapping = dev->archdata.mapping;
++ dma_addr_t iova = handle & PAGE_MASK;
++ struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
++ unsigned int offset = handle & ~PAGE_MASK;
++
++ if (!iova)
++ return;
++
++ __dma_page_cpu_to_dev(page, offset, size, dir);
++}
++
++struct dma_map_ops iommu_ops = {
++ .alloc = arm_iommu_alloc_attrs,
++ .free = arm_iommu_free_attrs,
++ .mmap = arm_iommu_mmap_attrs,
++
++ .map_page = arm_iommu_map_page,
++ .unmap_page = arm_iommu_unmap_page,
++ .sync_single_for_cpu = arm_iommu_sync_single_for_cpu,
++ .sync_single_for_device = arm_iommu_sync_single_for_device,
++
++ .map_sg = arm_iommu_map_sg,
++ .unmap_sg = arm_iommu_unmap_sg,
++ .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu,
++ .sync_sg_for_device = arm_iommu_sync_sg_for_device,
++};
++
++/**
++ * arm_iommu_create_mapping
++ * @bus: pointer to the bus holding the client device (for IOMMU calls)
++ * @base: start address of the valid IO address space
++ * @size: size of the valid IO address space
++ * @order: accuracy of the IO addresses allocations
++ *
++ * Creates a mapping structure which holds information about used/unused
++ * IO address ranges, which is required to perform memory allocation and
++ * mapping with IOMMU aware functions.
++ *
++ * The client device need to be attached to the mapping with
++ * arm_iommu_attach_device function.
++ */
++struct dma_iommu_mapping *
++arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
++ int order)
++{
++ unsigned int count = size >> (PAGE_SHIFT + order);
++ unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
++ struct dma_iommu_mapping *mapping;
++ int err = -ENOMEM;
++
++ if (!count)
++ return ERR_PTR(-EINVAL);
++
++ mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
++ if (!mapping)
++ goto err;
++
++ mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
++ if (!mapping->bitmap)
++ goto err2;
++
++ mapping->base = base;
++ mapping->bits = BITS_PER_BYTE * bitmap_size;
++ mapping->order = order;
++ spin_lock_init(&mapping->lock);
++
++ mapping->domain = iommu_domain_alloc(bus);
++ if (!mapping->domain)
++ goto err3;
++
++ kref_init(&mapping->kref);
++ return mapping;
++err3:
++ kfree(mapping->bitmap);
++err2:
++ kfree(mapping);
++err:
++ return ERR_PTR(err);
++}
++
++static void release_iommu_mapping(struct kref *kref)
++{
++ struct dma_iommu_mapping *mapping =
++ container_of(kref, struct dma_iommu_mapping, kref);
++
++ iommu_domain_free(mapping->domain);
++ kfree(mapping->bitmap);
++ kfree(mapping);
++}
++
++void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
++{
++ if (mapping)
++ kref_put(&mapping->kref, release_iommu_mapping);
++}
++
++/**
++ * arm_iommu_attach_device
++ * @dev: valid struct device pointer
++ * @mapping: io address space mapping structure (returned from
++ * arm_iommu_create_mapping)
++ *
++ * Attaches specified io address space mapping to the provided device,
++ * this replaces the dma operations (dma_map_ops pointer) with the
++ * IOMMU aware version. More than one client might be attached to
++ * the same io address space mapping.
++ */
++int arm_iommu_attach_device(struct device *dev,
++ struct dma_iommu_mapping *mapping)
++{
++ int err;
++
++ err = iommu_attach_device(mapping->domain, dev);
++ if (err)
++ return err;
++
++ kref_get(&mapping->kref);
++ dev->archdata.mapping = mapping;
++ set_dma_ops(dev, &iommu_ops);
++
++ pr_info("Attached IOMMU controller to %s device.\n", dev_name(dev));
++ return 0;
++}
++
++#endif
+diff --git a/arch/arm/mm/vmregion.h b/arch/arm/mm/vmregion.h
+index 162be66..bf312c3 100644
+--- a/arch/arm/mm/vmregion.h
++++ b/arch/arm/mm/vmregion.h
+@@ -17,7 +17,7 @@ struct arm_vmregion {
+ struct list_head vm_list;
+ unsigned long vm_start;
+ unsigned long vm_end;
+- struct page *vm_pages;
++ void *priv;
+ int vm_active;
+ const void *caller;
+ };
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/arm-dma-mapping-atomic_pool-with-struct-page-pages.patch b/patches.dma-mapping/arm-dma-mapping-atomic_pool-with-struct-page-pages.patch
new file mode 100644
index 0000000000000..45fcb26a2a92d
--- /dev/null
+++ b/patches.dma-mapping/arm-dma-mapping-atomic_pool-with-struct-page-pages.patch
@@ -0,0 +1,90 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:54:10 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:51:14 +0900
+Subject: [PATCH v2 57/58] ARM: dma-mapping: atomic_pool with struct page **pages
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-58-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Hiroshi Doyu <hdoyu@nvidia.com>
+
+struct page **pages is necessary to align with non atomic path in
+__iommu_get_pages(). atomic_pool() has the intialized **pages instead
+of just *page.
+
+Signed-off-by: Hiroshi Doyu <hdoyu@nvidia.com>
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+(cherry picked from commit 6b3fe47264262fa082897ebe8ae01041eae65e14)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+---
+ arch/arm/mm/dma-mapping.c | 17 ++++++++++++++---
+ 1 files changed, 14 insertions(+), 3 deletions(-)
+
+diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
+index dc16881..ba294a3 100644
+--- a/arch/arm/mm/dma-mapping.c
++++ b/arch/arm/mm/dma-mapping.c
+@@ -274,7 +274,7 @@ struct dma_pool {
+ unsigned long *bitmap;
+ unsigned long nr_pages;
+ void *vaddr;
+- struct page *page;
++ struct page **pages;
+ };
+
+ static struct dma_pool atomic_pool = {
+@@ -313,6 +313,7 @@ static int __init atomic_pool_init(void)
+ unsigned long nr_pages = pool->size >> PAGE_SHIFT;
+ unsigned long *bitmap;
+ struct page *page;
++ struct page **pages;
+ void *ptr;
+ int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long);
+
+@@ -320,21 +321,31 @@ static int __init atomic_pool_init(void)
+ if (!bitmap)
+ goto no_bitmap;
+
++ pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
++ if (!pages)
++ goto no_pages;
++
+ if (IS_ENABLED(CONFIG_CMA))
+ ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page);
+ else
+ ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot,
+ &page, NULL);
+ if (ptr) {
++ int i;
++
++ for (i = 0; i < nr_pages; i++)
++ pages[i] = page + i;
++
+ spin_lock_init(&pool->lock);
+ pool->vaddr = ptr;
+- pool->page = page;
++ pool->pages = pages;
+ pool->bitmap = bitmap;
+ pool->nr_pages = nr_pages;
+ pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n",
+ (unsigned)pool->size / 1024);
+ return 0;
+ }
++no_pages:
+ kfree(bitmap);
+ no_bitmap:
+ pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
+@@ -459,7 +470,7 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page)
+ if (pageno < pool->nr_pages) {
+ bitmap_set(pool->bitmap, pageno, count);
+ ptr = pool->vaddr + PAGE_SIZE * pageno;
+- *ret_page = pool->page + pageno;
++ *ret_page = pool->pages[pageno];
+ } else {
+ pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n"
+ "Please increase it with coherent_pool= kernel parameter!\n",
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/arm-dma-mapping-fix-atomic-allocation-alignment.patch b/patches.dma-mapping/arm-dma-mapping-fix-atomic-allocation-alignment.patch
new file mode 100644
index 0000000000000..39d734d06cc14
--- /dev/null
+++ b/patches.dma-mapping/arm-dma-mapping-fix-atomic-allocation-alignment.patch
@@ -0,0 +1,54 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:53:53 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:51:07 +0900
+Subject: [PATCH v2 50/58] ARM: dma-mapping: fix atomic allocation alignment
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-51-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Aaro Koskinen <aaro.koskinen@iki.fi>
+
+The alignment mask is calculated incorrectly. Fixing the calculation
+makes strange hangs/lockups disappear during the boot with Amstrad E3
+and 3.6-rc1 kernel.
+
+Signed-off-by: Aaro Koskinen <aaro.koskinen@iki.fi>
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+(cherry picked from commit e4ea6918c93b9f59d34e8ca2124b2b64b1afe73b)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+---
+ arch/arm/mm/dma-mapping.c | 6 +++---
+ 1 files changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
+index 1ce34ba..80491d3 100644
+--- a/arch/arm/mm/dma-mapping.c
++++ b/arch/arm/mm/dma-mapping.c
+@@ -422,7 +422,7 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page)
+ unsigned int pageno;
+ unsigned long flags;
+ void *ptr = NULL;
+- size_t align;
++ unsigned long align_mask;
+
+ if (!pool->vaddr) {
+ WARN(1, "coherent pool not initialised!\n");
+@@ -434,11 +434,11 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page)
+ * small, so align them to their order in pages, minimum is a page
+ * size. This helps reduce fragmentation of the DMA space.
+ */
+- align = PAGE_SIZE << get_order(size);
++ align_mask = (1 << get_order(size)) - 1;
+
+ spin_lock_irqsave(&pool->lock, flags);
+ pageno = bitmap_find_next_zero_area(pool->bitmap, pool->nr_pages,
+- 0, count, (1 << align) - 1);
++ 0, count, align_mask);
+ if (pageno < pool->nr_pages) {
+ bitmap_set(pool->bitmap, pageno, count);
+ ptr = pool->vaddr + PAGE_SIZE * pageno;
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/arm-dma-mapping-fix-buffer-chunk-allocation-order.patch b/patches.dma-mapping/arm-dma-mapping-fix-buffer-chunk-allocation-order.patch
new file mode 100644
index 0000000000000..1f16839816c48
--- /dev/null
+++ b/patches.dma-mapping/arm-dma-mapping-fix-buffer-chunk-allocation-order.patch
@@ -0,0 +1,49 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:53:48 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:51:05 +0900
+Subject: [PATCH v2 48/58] ARM: dma-mapping: fix buffer chunk allocation order
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-49-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+
+IOMMU-aware dma_alloc_attrs() implementation allocates buffers in
+power-of-two chunks to improve performance and take advantage of large
+page mappings provided by some IOMMU hardware. However current code, due
+to a subtle bug, allocated those chunks in the smallest-to-largest
+order, what completely killed all the advantages of using larger than
+page chunks. If a 4KiB chunk has been mapped as a first chunk, the
+consecutive chunks are not aligned correctly to the power-of-two which
+match their size and IOMMU drivers were not able to use internal
+mappings of size other than the 4KiB (largest common denominator of
+alignment and chunk size).
+
+This patch fixes this issue by changing to the correct largest-to-smallest
+chunk size allocation sequence.
+
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+(cherry picked from commit 593f47355467b9ef44293698817e2bdb347e2d11)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+---
+ arch/arm/mm/dma-mapping.c | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
+index caf2f3d..1ce34ba 100644
+--- a/arch/arm/mm/dma-mapping.c
++++ b/arch/arm/mm/dma-mapping.c
+@@ -940,7 +940,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t
+ return NULL;
+
+ while (count) {
+- int j, order = __ffs(count);
++ int j, order = __fls(count);
+
+ pages[i] = alloc_pages(gfp | __GFP_NOWARN, order);
+ while (!pages[i] && order)
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/arm-dma-mapping-fix-debug-messages-in-dmabounce-code.patch b/patches.dma-mapping/arm-dma-mapping-fix-debug-messages-in-dmabounce-code.patch
new file mode 100644
index 0000000000000..202a91051f2eb
--- /dev/null
+++ b/patches.dma-mapping/arm-dma-mapping-fix-debug-messages-in-dmabounce-code.patch
@@ -0,0 +1,81 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:53:10 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:50:50 +0900
+Subject: [PATCH v2 33/58] ARM: dma-mapping: fix debug messages in dmabounce code
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-34-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+
+This patch fixes the usage of uninitialized variables in dmabounce code
+intoduced by commit a227fb92 ('ARM: dma-mapping: remove offset parameter
+to prepare for generic dma_ops'):
+arch/arm/common/dmabounce.c: In function ‘dmabounce_sync_for_device’:
+arch/arm/common/dmabounce.c:409: warning: ‘off’ may be used uninitialized in this function
+arch/arm/common/dmabounce.c:407: note: ‘off’ was declared here
+arch/arm/common/dmabounce.c: In function ‘dmabounce_sync_for_cpu’:
+arch/arm/common/dmabounce.c:369: warning: ‘off’ may be used uninitialized in this function
+arch/arm/common/dmabounce.c:367: note: ‘off’ was declared here
+
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+(cherry picked from commit fdb1117325ad719dc39e81209bc622d511db70e0)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+---
+ arch/arm/common/dmabounce.c | 16 ++++++++--------
+ 1 files changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
+index 9d7eb53..aa07f59 100644
+--- a/arch/arm/common/dmabounce.c
++++ b/arch/arm/common/dmabounce.c
+@@ -366,8 +366,8 @@ static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
+ struct safe_buffer *buf;
+ unsigned long off;
+
+- dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
+- __func__, addr, off, sz, dir);
++ dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n",
++ __func__, addr, sz, dir);
+
+ buf = find_safe_buffer_dev(dev, addr, __func__);
+ if (!buf)
+@@ -377,8 +377,8 @@ static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
+
+ BUG_ON(buf->direction != dir);
+
+- dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
+- __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
++ dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
++ __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off,
+ buf->safe, buf->safe_dma_addr);
+
+ DO_STATS(dev->archdata.dmabounce->bounce_count++);
+@@ -406,8 +406,8 @@ static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
+ struct safe_buffer *buf;
+ unsigned long off;
+
+- dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
+- __func__, addr, off, sz, dir);
++ dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n",
++ __func__, addr, sz, dir);
+
+ buf = find_safe_buffer_dev(dev, addr, __func__);
+ if (!buf)
+@@ -417,8 +417,8 @@ static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
+
+ BUG_ON(buf->direction != dir);
+
+- dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
+- __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
++ dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
++ __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off,
+ buf->safe, buf->safe_dma_addr);
+
+ DO_STATS(dev->archdata.dmabounce->bounce_count++);
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/arm-dma-mapping-fix-error-path-for-memory-allocation-failure.patch b/patches.dma-mapping/arm-dma-mapping-fix-error-path-for-memory-allocation-failure.patch
new file mode 100644
index 0000000000000..97fec16f8a746
--- /dev/null
+++ b/patches.dma-mapping/arm-dma-mapping-fix-error-path-for-memory-allocation-failure.patch
@@ -0,0 +1,40 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:53:35 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:51:00 +0900
+Subject: [PATCH v2 43/58] ARM: dma-mapping: fix error path for memory allocation failure
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-44-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+
+This patch fixes incorrect check in error path. When the allocation of
+first page fails, the kernel ops appears due to accessing -1 element of
+the pages array.
+
+Reported-by: Sylwester Nawrocki <s.nawrocki@samsung.com>
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+(cherry picked from commit 9fa8af91f0679f2abbebe1382b937264f3a8b981)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+---
+ arch/arm/mm/dma-mapping.c | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
+index 6a94d17..caf2f3d 100644
+--- a/arch/arm/mm/dma-mapping.c
++++ b/arch/arm/mm/dma-mapping.c
+@@ -961,7 +961,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t
+
+ return pages;
+ error:
+- while (--i)
++ while (i--)
+ if (pages[i])
+ __free_pages(pages[i], 0);
+ if (array_size <= PAGE_SIZE)
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/arm-dma-mapping-fix-incorrect-freeing-of-atomic-allocations.patch b/patches.dma-mapping/arm-dma-mapping-fix-incorrect-freeing-of-atomic-allocations.patch
new file mode 100644
index 0000000000000..aa11ff74aa38c
--- /dev/null
+++ b/patches.dma-mapping/arm-dma-mapping-fix-incorrect-freeing-of-atomic-allocations.patch
@@ -0,0 +1,81 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:53:56 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:51:08 +0900
+Subject: [PATCH v2 51/58] ARM: dma-mapping: fix incorrect freeing of atomic allocations
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-52-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Aaro Koskinen <aaro.koskinen@iki.fi>
+
+Commit e9da6e9905e639b0f842a244bc770b48ad0523e9 (ARM: dma-mapping:
+remove custom consistent dma region) changed the way atomic allocations
+are handled. However, arm_dma_free() was not modified accordingly, and
+as a result freeing of atomic allocations does not work correctly when
+CMA is disabled. Memory is leaked and following WARNINGs are seen:
+
+[ 57.698911] ------------[ cut here ]------------
+[ 57.753518] WARNING: at arch/arm/mm/dma-mapping.c:263 arm_dma_free+0x88/0xe4()
+[ 57.811473] trying to free invalid coherent area: e0848000
+[ 57.867398] Modules linked in: sata_mv(-)
+[ 57.921373] [<c000d270>] (unwind_backtrace+0x0/0xf0) from [<c0015430>] (warn_slowpath_common+0x50/0x68)
+[ 58.033924] [<c0015430>] (warn_slowpath_common+0x50/0x68) from [<c00154dc>] (warn_slowpath_fmt+0x30/0x40)
+[ 58.152024] [<c00154dc>] (warn_slowpath_fmt+0x30/0x40) from [<c000dc18>] (arm_dma_free+0x88/0xe4)
+[ 58.219592] [<c000dc18>] (arm_dma_free+0x88/0xe4) from [<c008fa30>] (dma_pool_destroy+0x100/0x148)
+[ 58.345526] [<c008fa30>] (dma_pool_destroy+0x100/0x148) from [<c019a64c>] (release_nodes+0x144/0x218)
+[ 58.475782] [<c019a64c>] (release_nodes+0x144/0x218) from [<c0197e10>] (__device_release_driver+0x60/0xb8)
+[ 58.614260] [<c0197e10>] (__device_release_driver+0x60/0xb8) from [<c0198608>] (driver_detach+0xd8/0xec)
+[ 58.756527] [<c0198608>] (driver_detach+0xd8/0xec) from [<c0197c54>] (bus_remove_driver+0x7c/0xc4)
+[ 58.901648] [<c0197c54>] (bus_remove_driver+0x7c/0xc4) from [<c004bfac>] (sys_delete_module+0x19c/0x220)
+[ 59.051447] [<c004bfac>] (sys_delete_module+0x19c/0x220) from [<c0009140>] (ret_fast_syscall+0x0/0x2c)
+[ 59.207996] ---[ end trace 0745420412c0325a ]---
+[ 59.287110] ------------[ cut here ]------------
+[ 59.366324] WARNING: at arch/arm/mm/dma-mapping.c:263 arm_dma_free+0x88/0xe4()
+[ 59.450511] trying to free invalid coherent area: e0847000
+[ 59.534357] Modules linked in: sata_mv(-)
+[ 59.616785] [<c000d270>] (unwind_backtrace+0x0/0xf0) from [<c0015430>] (warn_slowpath_common+0x50/0x68)
+[ 59.790030] [<c0015430>] (warn_slowpath_common+0x50/0x68) from [<c00154dc>] (warn_slowpath_fmt+0x30/0x40)
+[ 59.972322] [<c00154dc>] (warn_slowpath_fmt+0x30/0x40) from [<c000dc18>] (arm_dma_free+0x88/0xe4)
+[ 60.070701] [<c000dc18>] (arm_dma_free+0x88/0xe4) from [<c008fa30>] (dma_pool_destroy+0x100/0x148)
+[ 60.256817] [<c008fa30>] (dma_pool_destroy+0x100/0x148) from [<c019a64c>] (release_nodes+0x144/0x218)
+[ 60.445201] [<c019a64c>] (release_nodes+0x144/0x218) from [<c0197e10>] (__device_release_driver+0x60/0xb8)
+[ 60.634148] [<c0197e10>] (__device_release_driver+0x60/0xb8) from [<c0198608>] (driver_detach+0xd8/0xec)
+[ 60.823623] [<c0198608>] (driver_detach+0xd8/0xec) from [<c0197c54>] (bus_remove_driver+0x7c/0xc4)
+[ 61.013268] [<c0197c54>] (bus_remove_driver+0x7c/0xc4) from [<c004bfac>] (sys_delete_module+0x19c/0x220)
+[ 61.203472] [<c004bfac>] (sys_delete_module+0x19c/0x220) from [<c0009140>] (ret_fast_syscall+0x0/0x2c)
+[ 61.393390] ---[ end trace 0745420412c0325b ]---
+
+The patch fixes this.
+
+Signed-off-by: Aaro Koskinen <aaro.koskinen@iki.fi>
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+(cherry picked from commit d9e0d149b5dcc2ef4688afc572b9906bcda941ef)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+---
+ arch/arm/mm/dma-mapping.c | 4 ++--
+ 1 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
+index 80491d3..258da10 100644
+--- a/arch/arm/mm/dma-mapping.c
++++ b/arch/arm/mm/dma-mapping.c
+@@ -647,12 +647,12 @@ void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
+
+ if (arch_is_coherent() || nommu()) {
+ __dma_free_buffer(page, size);
++ } else if (__free_from_pool(cpu_addr, size)) {
++ return;
+ } else if (!IS_ENABLED(CONFIG_CMA)) {
+ __dma_free_remap(cpu_addr, size);
+ __dma_free_buffer(page, size);
+ } else {
+- if (__free_from_pool(cpu_addr, size))
+- return;
+ /*
+ * Non-atomic allocations cannot be freed with IRQs disabled
+ */
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/arm-dma-mapping-fix-potential-memory-leak-in-atomic_pool_init.patch b/patches.dma-mapping/arm-dma-mapping-fix-potential-memory-leak-in-atomic_pool_init.patch
new file mode 100644
index 0000000000000..a65709edda9d7
--- /dev/null
+++ b/patches.dma-mapping/arm-dma-mapping-fix-potential-memory-leak-in-atomic_pool_init.patch
@@ -0,0 +1,37 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:54:13 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:51:15 +0900
+Subject: [PATCH v2 58/58] ARM: dma-mapping: Fix potential memory leak in atomic_pool_init()
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-59-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Sachin Kamat <sachin.kamat@linaro.org>
+
+When either of __alloc_from_contiguous or __alloc_remap_buffer fails
+to provide a valid pointer, allocated memory is freed up and an error
+is returned. 'pages' was however not freed before returning error.
+
+Cc: Arnd Bergmann <arnd@arndb.de>
+Cc: Marek Szyprowski <m.szyprowski@samsung.com>
+Signed-off-by: Sachin Kamat <sachin.kamat@linaro.org>
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+(cherry picked from commit ec10665cbf271fb1f60daeb194ad4f2cdcdc59d9)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+---
+ arch/arm/mm/dma-mapping.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/arm/mm/dma-mapping.c
++++ b/arch/arm/mm/dma-mapping.c
+@@ -345,6 +345,8 @@ static int __init atomic_pool_init(void)
+ (unsigned)pool->size / 1024);
+ return 0;
+ }
++
++ kfree(pages);
+ no_pages:
+ kfree(bitmap);
+ no_bitmap:
diff --git a/patches.dma-mapping/arm-dma-mapping-implement-dma-sg-methods-on-top-of-any-generic-dma-ops.patch b/patches.dma-mapping/arm-dma-mapping-implement-dma-sg-methods-on-top-of-any-generic-dma-ops.patch
new file mode 100644
index 0000000000000..bf649730b2c4e
--- /dev/null
+++ b/patches.dma-mapping/arm-dma-mapping-implement-dma-sg-methods-on-top-of-any-generic-dma-ops.patch
@@ -0,0 +1,153 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:52:47 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:50:41 +0900
+Subject: [PATCH v2 24/58] ARM: dma-mapping: implement dma sg methods on top of any generic dma ops
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-25-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+
+This patch converts all dma_sg methods to be generic (independent of the
+current DMA mapping implementation for ARM architecture). All dma sg
+operations are now implemented on top of respective
+dma_map_page/dma_sync_single_for* operations from dma_map_ops structure.
+
+Before this patch there were custom methods for all scatter/gather
+related operations. They iterated over the whole scatter list and called
+cache related operations directly (which in turn checked if we use dma
+bounce code or not and called respective version). This patch changes
+them not to use such shortcut. Instead it provides similar loop over
+scatter list and calls methods from the device's dma_map_ops structure.
+This enables us to use device dependent implementations of cache related
+operations (direct linear or dma bounce) depending on the provided
+dma_map_ops structure.
+
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Acked-by: Kyungmin Park <kyungmin.park@samsung.com>
+Tested-By: Subash Patel <subash.ramaswamy@linaro.org>
+(cherry picked from commit 2a550e73d3e5f040a3e8eb733c942ab352eafb36)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+---
+ arch/arm/mm/dma-mapping.c | 43 +++++++++++++++++++------------------------
+ 1 files changed, 19 insertions(+), 24 deletions(-)
+
+diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
+index 4a1f336..5b2b652 100644
+--- a/arch/arm/mm/dma-mapping.c
++++ b/arch/arm/mm/dma-mapping.c
+@@ -858,7 +858,7 @@ void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
+ EXPORT_SYMBOL(___dma_page_dev_to_cpu);
+
+ /**
+- * dma_map_sg - map a set of SG buffers for streaming mode DMA
++ * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @sg: list of buffers
+ * @nents: number of buffers to map
+@@ -876,12 +876,13 @@ EXPORT_SYMBOL(___dma_page_dev_to_cpu);
+ int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
+ {
++ struct dma_map_ops *ops = get_dma_ops(dev);
+ struct scatterlist *s;
+ int i, j;
+
+ for_each_sg(sg, s, nents, i) {
+- s->dma_address = __dma_map_page(dev, sg_page(s), s->offset,
+- s->length, dir);
++ s->dma_address = ops->map_page(dev, sg_page(s), s->offset,
++ s->length, dir, attrs);
+ if (dma_mapping_error(dev, s->dma_address))
+ goto bad_mapping;
+ }
+@@ -889,12 +890,12 @@ int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+
+ bad_mapping:
+ for_each_sg(sg, s, i, j)
+- __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
++ ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
+ return 0;
+ }
+
+ /**
+- * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
++ * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @sg: list of buffers
+ * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
+@@ -906,15 +907,17 @@ int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction dir, struct dma_attrs *attrs)
+ {
++ struct dma_map_ops *ops = get_dma_ops(dev);
+ struct scatterlist *s;
++
+ int i;
+
+ for_each_sg(sg, s, nents, i)
+- __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
++ ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
+ }
+
+ /**
+- * dma_sync_sg_for_cpu
++ * arm_dma_sync_sg_for_cpu
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @sg: list of buffers
+ * @nents: number of buffers to map (returned from dma_map_sg)
+@@ -923,21 +926,17 @@ void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+ void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir)
+ {
++ struct dma_map_ops *ops = get_dma_ops(dev);
+ struct scatterlist *s;
+ int i;
+
+- for_each_sg(sg, s, nents, i) {
+- if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s),
+- sg_dma_len(s), dir))
+- continue;
+-
+- __dma_page_dev_to_cpu(sg_page(s), s->offset,
+- s->length, dir);
+- }
++ for_each_sg(sg, s, nents, i)
++ ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length,
++ dir);
+ }
+
+ /**
+- * dma_sync_sg_for_device
++ * arm_dma_sync_sg_for_device
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @sg: list of buffers
+ * @nents: number of buffers to map (returned from dma_map_sg)
+@@ -946,17 +945,13 @@ void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+ void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir)
+ {
++ struct dma_map_ops *ops = get_dma_ops(dev);
+ struct scatterlist *s;
+ int i;
+
+- for_each_sg(sg, s, nents, i) {
+- if (!dmabounce_sync_for_device(dev, sg_dma_address(s),
+- sg_dma_len(s), dir))
+- continue;
+-
+- __dma_page_cpu_to_dev(sg_page(s), s->offset,
+- s->length, dir);
+- }
++ for_each_sg(sg, s, nents, i)
++ ops->sync_single_for_device(dev, sg_dma_address(s), s->length,
++ dir);
+ }
+
+ /*
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/arm-dma-mapping-introduce-dma_error_code-constant.patch b/patches.dma-mapping/arm-dma-mapping-introduce-dma_error_code-constant.patch
new file mode 100644
index 0000000000000..f2d24fadc18d8
--- /dev/null
+++ b/patches.dma-mapping/arm-dma-mapping-introduce-dma_error_code-constant.patch
@@ -0,0 +1,96 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:52:39 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:50:38 +0900
+Subject: [PATCH v2 21/58] ARM: dma-mapping: introduce DMA_ERROR_CODE constant
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-22-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+
+Replace all uses of ~0 with DMA_ERROR_CODE, what should make the code
+easier to read.
+
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Acked-by: Kyungmin Park <kyungmin.park@samsung.com>
+Tested-By: Subash Patel <subash.ramaswamy@linaro.org>
+(cherry picked from commit 553ac78877242b6d8b591323731df304140d0f99)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+---
+ arch/arm/common/dmabounce.c | 6 +++---
+ arch/arm/include/asm/dma-mapping.h | 4 +++-
+ arch/arm/mm/dma-mapping.c | 2 +-
+ 3 files changed, 7 insertions(+), 5 deletions(-)
+
+diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
+index 595ecd29..210ad1b 100644
+--- a/arch/arm/common/dmabounce.c
++++ b/arch/arm/common/dmabounce.c
+@@ -254,7 +254,7 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
+ if (buf == NULL) {
+ dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
+ __func__, ptr);
+- return ~0;
++ return DMA_ERROR_CODE;
+ }
+
+ dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
+@@ -320,7 +320,7 @@ dma_addr_t __dma_map_page(struct device *dev, struct page *page,
+
+ ret = needs_bounce(dev, dma_addr, size);
+ if (ret < 0)
+- return ~0;
++ return DMA_ERROR_CODE;
+
+ if (ret == 0) {
+ __dma_page_cpu_to_dev(page, offset, size, dir);
+@@ -329,7 +329,7 @@ dma_addr_t __dma_map_page(struct device *dev, struct page *page,
+
+ if (PageHighMem(page)) {
+ dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n");
+- return ~0;
++ return DMA_ERROR_CODE;
+ }
+
+ return map_single(dev, page_address(page) + offset, size, dir);
+diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
+index cb3b7c9..6a838da 100644
+--- a/arch/arm/include/asm/dma-mapping.h
++++ b/arch/arm/include/asm/dma-mapping.h
+@@ -10,6 +10,8 @@
+ #include <asm-generic/dma-coherent.h>
+ #include <asm/memory.h>
+
++#define DMA_ERROR_CODE (~0)
++
+ #ifdef __arch_page_to_dma
+ #error Please update to __arch_pfn_to_dma
+ #endif
+@@ -123,7 +125,7 @@ extern int dma_set_mask(struct device *, u64);
+ */
+ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+ {
+- return dma_addr == ~0;
++ return dma_addr == DMA_ERROR_CODE;
+ }
+
+ /*
+diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
+index bb19804..fab24ec 100644
+--- a/arch/arm/mm/dma-mapping.c
++++ b/arch/arm/mm/dma-mapping.c
+@@ -584,7 +584,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+ */
+ gfp &= ~(__GFP_COMP);
+
+- *handle = ~0;
++ *handle = DMA_ERROR_CODE;
+ size = PAGE_ALIGN(size);
+
+ if (arch_is_coherent() || nommu())
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/arm-dma-mapping-modify-condition-check-while-freeing-pages.patch b/patches.dma-mapping/arm-dma-mapping-modify-condition-check-while-freeing-pages.patch
new file mode 100644
index 0000000000000..62a2fd77b44dd
--- /dev/null
+++ b/patches.dma-mapping/arm-dma-mapping-modify-condition-check-while-freeing-pages.patch
@@ -0,0 +1,72 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:53:20 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:50:54 +0900
+Subject: [PATCH v2 37/58] ARM: dma-mapping: modify condition check while freeing pages
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-38-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Prathyush K <prathyush.k@samsung.com>
+
+WARNING: at mm/vmalloc.c:1471 __iommu_free_buffer+0xcc/0xd0()
+Trying to vfree() nonexistent vm area (ef095000)
+Modules linked in:
+[<c0015a18>] (unwind_backtrace+0x0/0xfc) from [<c0025a94>] (warn_slowpath_common+0x54/0x64)
+[<c0025a94>] (warn_slowpath_common+0x54/0x64) from [<c0025b38>] (warn_slowpath_fmt+0x30/0x40)
+[<c0025b38>] (warn_slowpath_fmt+0x30/0x40) from [<c0016de0>] (__iommu_free_buffer+0xcc/0xd0)
+[<c0016de0>] (__iommu_free_buffer+0xcc/0xd0) from [<c0229a5c>] (exynos_drm_free_buf+0xe4/0x138)
+[<c0229a5c>] (exynos_drm_free_buf+0xe4/0x138) from [<c022b358>] (exynos_drm_gem_destroy+0x80/0xfc)
+[<c022b358>] (exynos_drm_gem_destroy+0x80/0xfc) from [<c0211230>] (drm_gem_object_free+0x28/0x34)
+[<c0211230>] (drm_gem_object_free+0x28/0x34) from [<c0211bd0>] (drm_gem_object_release_handle+0xcc/0xd8)
+[<c0211bd0>] (drm_gem_object_release_handle+0xcc/0xd8) from [<c01abe10>] (idr_for_each+0x74/0xb8)
+[<c01abe10>] (idr_for_each+0x74/0xb8) from [<c02114e4>] (drm_gem_release+0x1c/0x30)
+[<c02114e4>] (drm_gem_release+0x1c/0x30) from [<c0210ae8>] (drm_release+0x608/0x694)
+[<c0210ae8>] (drm_release+0x608/0x694) from [<c00b75a0>] (fput+0xb8/0x228)
+[<c00b75a0>] (fput+0xb8/0x228) from [<c00b40c4>] (filp_close+0x64/0x84)
+[<c00b40c4>] (filp_close+0x64/0x84) from [<c0029d54>] (put_files_struct+0xe8/0x104)
+[<c0029d54>] (put_files_struct+0xe8/0x104) from [<c002b930>] (do_exit+0x608/0x774)
+[<c002b930>] (do_exit+0x608/0x774) from [<c002bae4>] (do_group_exit+0x48/0xb4)
+[<c002bae4>] (do_group_exit+0x48/0xb4) from [<c002bb60>] (sys_exit_group+0x10/0x18)
+[<c002bb60>] (sys_exit_group+0x10/0x18) from [<c000ee80>] (ret_fast_syscall+0x0/0x30)
+
+This patch modifies the condition while freeing to match the condition
+used while allocation. This fixes the above warning which arises when
+array size is equal to PAGE_SIZE where allocation is done using kzalloc
+but free is done using vfree.
+
+Signed-off-by: Prathyush K <prathyush.k@samsung.com>
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+(cherry picked from commit 46c87852e99cf8ce97e207b11cde19085837e39c)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+---
+ arch/arm/mm/dma-mapping.c | 4 ++--
+ 1 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
+index d766e42..6f85d3d 100644
+--- a/arch/arm/mm/dma-mapping.c
++++ b/arch/arm/mm/dma-mapping.c
+@@ -1091,7 +1091,7 @@ error:
+ while (--i)
+ if (pages[i])
+ __free_pages(pages[i], 0);
+- if (array_size < PAGE_SIZE)
++ if (array_size <= PAGE_SIZE)
+ kfree(pages);
+ else
+ vfree(pages);
+@@ -1106,7 +1106,7 @@ static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t s
+ for (i = 0; i < count; i++)
+ if (pages[i])
+ __free_pages(pages[i], 0);
+- if (array_size < PAGE_SIZE)
++ if (array_size <= PAGE_SIZE)
+ kfree(pages);
+ else
+ vfree(pages);
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/arm-dma-mapping-move-all-dma-bounce-code-to-separate-dma-ops-structure.patch b/patches.dma-mapping/arm-dma-mapping-move-all-dma-bounce-code-to-separate-dma-ops-structure.patch
new file mode 100644
index 0000000000000..a68a411ba6353
--- /dev/null
+++ b/patches.dma-mapping/arm-dma-mapping-move-all-dma-bounce-code-to-separate-dma-ops-structure.patch
@@ -0,0 +1,425 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:52:50 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:50:42 +0900
+Subject: [PATCH v2 25/58] ARM: dma-mapping: move all dma bounce code to separate dma ops structure
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-26-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+
+This patch removes dma bounce hooks from the common dma mapping
+implementation on ARM architecture and creates a separate set of
+dma_map_ops for dma bounce devices.
+
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Acked-by: Kyungmin Park <kyungmin.park@samsung.com>
+Tested-By: Subash Patel <subash.ramaswamy@linaro.org>
+(cherry picked from commit 15237e1f505b3e5c2276f240b01cd2133e110cbc)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+---
+ arch/arm/common/dmabounce.c | 62 ++++++++++++++++++-----
+ arch/arm/include/asm/dma-mapping.h | 99 +-----------------------------------
+ arch/arm/mm/dma-mapping.c | 79 +++++++++++++++++++++++++----
+ 3 files changed, 120 insertions(+), 120 deletions(-)
+
+diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
+index 32e9cc6..813c29d 100644
+--- a/arch/arm/common/dmabounce.c
++++ b/arch/arm/common/dmabounce.c
+@@ -308,8 +308,9 @@ static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
+ * substitute the safe buffer for the unsafe one.
+ * (basically move the buffer from an unsafe area to a safe one)
+ */
+-dma_addr_t __dma_map_page(struct device *dev, struct page *page,
+- unsigned long offset, size_t size, enum dma_data_direction dir)
++static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
++ unsigned long offset, size_t size, enum dma_data_direction dir,
++ struct dma_attrs *attrs)
+ {
+ dma_addr_t dma_addr;
+ int ret;
+@@ -324,7 +325,7 @@ dma_addr_t __dma_map_page(struct device *dev, struct page *page,
+ return DMA_ERROR_CODE;
+
+ if (ret == 0) {
+- __dma_page_cpu_to_dev(page, offset, size, dir);
++ arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir);
+ return dma_addr;
+ }
+
+@@ -335,7 +336,6 @@ dma_addr_t __dma_map_page(struct device *dev, struct page *page,
+
+ return map_single(dev, page_address(page) + offset, size, dir);
+ }
+-EXPORT_SYMBOL(__dma_map_page);
+
+ /*
+ * see if a mapped address was really a "safe" buffer and if so, copy
+@@ -343,8 +343,8 @@ EXPORT_SYMBOL(__dma_map_page);
+ * the safe buffer. (basically return things back to the way they
+ * should be)
+ */
+-void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
+- enum dma_data_direction dir)
++static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
++ enum dma_data_direction dir, struct dma_attrs *attrs)
+ {
+ struct safe_buffer *buf;
+
+@@ -353,16 +353,14 @@ void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
+
+ buf = find_safe_buffer_dev(dev, dma_addr, __func__);
+ if (!buf) {
+- __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, dma_addr)),
+- dma_addr & ~PAGE_MASK, size, dir);
++ arm_dma_ops.sync_single_for_cpu(dev, dma_addr, size, dir);
+ return;
+ }
+
+ unmap_single(dev, buf, size, dir);
+ }
+-EXPORT_SYMBOL(__dma_unmap_page);
+
+-int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
++static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
+ size_t sz, enum dma_data_direction dir)
+ {
+ struct safe_buffer *buf;
+@@ -392,9 +390,17 @@ int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
+ }
+ return 0;
+ }
+-EXPORT_SYMBOL(dmabounce_sync_for_cpu);
+
+-int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
++static void dmabounce_sync_for_cpu(struct device *dev,
++ dma_addr_t handle, size_t size, enum dma_data_direction dir)
++{
++ if (!__dmabounce_sync_for_cpu(dev, handle, size, dir))
++ return;
++
++ arm_dma_ops.sync_single_for_cpu(dev, handle, size, dir);
++}
++
++static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
+ size_t sz, enum dma_data_direction dir)
+ {
+ struct safe_buffer *buf;
+@@ -424,7 +430,35 @@ int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
+ }
+ return 0;
+ }
+-EXPORT_SYMBOL(dmabounce_sync_for_device);
++
++static void dmabounce_sync_for_device(struct device *dev,
++ dma_addr_t handle, size_t size, enum dma_data_direction dir)
++{
++ if (!__dmabounce_sync_for_device(dev, handle, size, dir))
++ return;
++
++ arm_dma_ops.sync_single_for_device(dev, handle, size, dir);
++}
++
++static int dmabounce_set_mask(struct device *dev, u64 dma_mask)
++{
++ if (dev->archdata.dmabounce)
++ return 0;
++
++ return arm_dma_ops.set_dma_mask(dev, dma_mask);
++}
++
++static struct dma_map_ops dmabounce_ops = {
++ .map_page = dmabounce_map_page,
++ .unmap_page = dmabounce_unmap_page,
++ .sync_single_for_cpu = dmabounce_sync_for_cpu,
++ .sync_single_for_device = dmabounce_sync_for_device,
++ .map_sg = arm_dma_map_sg,
++ .unmap_sg = arm_dma_unmap_sg,
++ .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
++ .sync_sg_for_device = arm_dma_sync_sg_for_device,
++ .set_dma_mask = dmabounce_set_mask,
++};
+
+ static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
+ const char *name, unsigned long size)
+@@ -486,6 +520,7 @@ int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
+ #endif
+
+ dev->archdata.dmabounce = device_info;
++ set_dma_ops(dev, &dmabounce_ops);
+
+ dev_info(dev, "dmabounce: registered device\n");
+
+@@ -504,6 +539,7 @@ void dmabounce_unregister_dev(struct device *dev)
+ struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
+
+ dev->archdata.dmabounce = NULL;
++ set_dma_ops(dev, NULL);
+
+ if (!device_info) {
+ dev_warn(dev,
+diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
+index 6725a08..7a7c3c7 100644
+--- a/arch/arm/include/asm/dma-mapping.h
++++ b/arch/arm/include/asm/dma-mapping.h
+@@ -85,62 +85,6 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
+ #endif
+
+ /*
+- * The DMA API is built upon the notion of "buffer ownership". A buffer
+- * is either exclusively owned by the CPU (and therefore may be accessed
+- * by it) or exclusively owned by the DMA device. These helper functions
+- * represent the transitions between these two ownership states.
+- *
+- * Note, however, that on later ARMs, this notion does not work due to
+- * speculative prefetches. We model our approach on the assumption that
+- * the CPU does do speculative prefetches, which means we clean caches
+- * before transfers and delay cache invalidation until transfer completion.
+- *
+- * Private support functions: these are not part of the API and are
+- * liable to change. Drivers must not use these.
+- */
+-static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
+- enum dma_data_direction dir)
+-{
+- extern void ___dma_single_cpu_to_dev(const void *, size_t,
+- enum dma_data_direction);
+-
+- if (!arch_is_coherent())
+- ___dma_single_cpu_to_dev(kaddr, size, dir);
+-}
+-
+-static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
+- enum dma_data_direction dir)
+-{
+- extern void ___dma_single_dev_to_cpu(const void *, size_t,
+- enum dma_data_direction);
+-
+- if (!arch_is_coherent())
+- ___dma_single_dev_to_cpu(kaddr, size, dir);
+-}
+-
+-static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
+- size_t size, enum dma_data_direction dir)
+-{
+- extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
+- size_t, enum dma_data_direction);
+-
+- if (!arch_is_coherent())
+- ___dma_page_cpu_to_dev(page, off, size, dir);
+-}
+-
+-static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
+- size_t size, enum dma_data_direction dir)
+-{
+- extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
+- size_t, enum dma_data_direction);
+-
+- if (!arch_is_coherent())
+- ___dma_page_dev_to_cpu(page, off, size, dir);
+-}
+-
+-extern int dma_supported(struct device *, u64);
+-extern int dma_set_mask(struct device *, u64);
+-/*
+ * DMA errors are defined by all-bits-set in the DMA address.
+ */
+ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+@@ -163,6 +107,8 @@ static inline void dma_free_noncoherent(struct device *dev, size_t size,
+ {
+ }
+
++extern int dma_supported(struct device *dev, u64 mask);
++
+ /**
+ * dma_alloc_coherent - allocate consistent memory for DMA
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+@@ -235,7 +181,6 @@ int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
+ extern void __init init_consistent_dma_size(unsigned long size);
+
+
+-#ifdef CONFIG_DMABOUNCE
+ /*
+ * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
+ * and utilize bounce buffers as needed to work around limited DMA windows.
+@@ -275,47 +220,7 @@ extern int dmabounce_register_dev(struct device *, unsigned long,
+ */
+ extern void dmabounce_unregister_dev(struct device *);
+
+-/*
+- * The DMA API, implemented by dmabounce.c. See below for descriptions.
+- */
+-extern dma_addr_t __dma_map_page(struct device *, struct page *,
+- unsigned long, size_t, enum dma_data_direction);
+-extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
+- enum dma_data_direction);
+-
+-/*
+- * Private functions
+- */
+-int dmabounce_sync_for_cpu(struct device *, dma_addr_t, size_t, enum dma_data_direction);
+-int dmabounce_sync_for_device(struct device *, dma_addr_t, size_t, enum dma_data_direction);
+-#else
+-static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr,
+- size_t size, enum dma_data_direction dir)
+-{
+- return 1;
+-}
+-
+-static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
+- size_t size, enum dma_data_direction dir)
+-{
+- return 1;
+-}
+-
+
+-static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
+- unsigned long offset, size_t size, enum dma_data_direction dir)
+-{
+- __dma_page_cpu_to_dev(page, offset, size, dir);
+- return pfn_to_dma(dev, page_to_pfn(page)) + offset;
+-}
+-
+-static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
+- size_t size, enum dma_data_direction dir)
+-{
+- __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
+- handle & ~PAGE_MASK, size, dir);
+-}
+-#endif /* CONFIG_DMABOUNCE */
+
+ /*
+ * The scatter list versions of the above methods.
+diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
+index 5b2b652..0265733 100644
+--- a/arch/arm/mm/dma-mapping.c
++++ b/arch/arm/mm/dma-mapping.c
+@@ -34,6 +34,75 @@
+
+ #include "mm.h"
+
++/*
++ * The DMA API is built upon the notion of "buffer ownership". A buffer
++ * is either exclusively owned by the CPU (and therefore may be accessed
++ * by it) or exclusively owned by the DMA device. These helper functions
++ * represent the transitions between these two ownership states.
++ *
++ * Note, however, that on later ARMs, this notion does not work due to
++ * speculative prefetches. We model our approach on the assumption that
++ * the CPU does do speculative prefetches, which means we clean caches
++ * before transfers and delay cache invalidation until transfer completion.
++ *
++ * Private support functions: these are not part of the API and are
++ * liable to change. Drivers must not use these.
++ */
++static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
++ enum dma_data_direction dir)
++{
++ extern void ___dma_single_cpu_to_dev(const void *, size_t,
++ enum dma_data_direction);
++
++ if (!arch_is_coherent())
++ ___dma_single_cpu_to_dev(kaddr, size, dir);
++}
++
++static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
++ enum dma_data_direction dir)
++{
++ extern void ___dma_single_dev_to_cpu(const void *, size_t,
++ enum dma_data_direction);
++
++ if (!arch_is_coherent())
++ ___dma_single_dev_to_cpu(kaddr, size, dir);
++}
++
++static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
++ size_t size, enum dma_data_direction dir)
++{
++ extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
++ size_t, enum dma_data_direction);
++
++ if (!arch_is_coherent())
++ ___dma_page_cpu_to_dev(page, off, size, dir);
++}
++
++static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
++ size_t size, enum dma_data_direction dir)
++{
++ extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
++ size_t, enum dma_data_direction);
++
++ if (!arch_is_coherent())
++ ___dma_page_dev_to_cpu(page, off, size, dir);
++}
++
++
++static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
++ unsigned long offset, size_t size, enum dma_data_direction dir)
++{
++ __dma_page_cpu_to_dev(page, offset, size, dir);
++ return pfn_to_dma(dev, page_to_pfn(page)) + offset;
++}
++
++static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
++ size_t size, enum dma_data_direction dir)
++{
++ __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
++ handle & ~PAGE_MASK, size, dir);
++}
++
+ /**
+ * arm_dma_map_page - map a portion of a page for streaming DMA
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+@@ -81,9 +150,6 @@ static inline void arm_dma_sync_single_for_cpu(struct device *dev,
+ {
+ unsigned int offset = handle & (PAGE_SIZE - 1);
+ struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
+- if (!dmabounce_sync_for_cpu(dev, handle, size, dir))
+- return;
+-
+ __dma_page_dev_to_cpu(page, offset, size, dir);
+ }
+
+@@ -92,9 +158,6 @@ static inline void arm_dma_sync_single_for_device(struct device *dev,
+ {
+ unsigned int offset = handle & (PAGE_SIZE - 1);
+ struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
+- if (!dmabounce_sync_for_device(dev, handle, size, dir))
+- return;
+-
+ __dma_page_cpu_to_dev(page, offset, size, dir);
+ }
+
+@@ -835,7 +898,6 @@ void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
+ }
+ /* FIXME: non-speculating: flush on bidirectional mappings? */
+ }
+-EXPORT_SYMBOL(___dma_page_cpu_to_dev);
+
+ void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
+ size_t size, enum dma_data_direction dir)
+@@ -855,7 +917,6 @@ void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
+ if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE)
+ set_bit(PG_dcache_clean, &page->flags);
+ }
+-EXPORT_SYMBOL(___dma_page_dev_to_cpu);
+
+ /**
+ * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
+@@ -973,9 +1034,7 @@ static int arm_dma_set_mask(struct device *dev, u64 dma_mask)
+ if (!dev->dma_mask || !dma_supported(dev, dma_mask))
+ return -EIO;
+
+-#ifndef CONFIG_DMABOUNCE
+ *dev->dma_mask = dma_mask;
+-#endif
+
+ return 0;
+ }
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/arm-dma-mapping-print-warning-when-atomic-coherent-allocation-fails.patch b/patches.dma-mapping/arm-dma-mapping-print-warning-when-atomic-coherent-allocation-fails.patch
new file mode 100644
index 0000000000000..99ab205756de5
--- /dev/null
+++ b/patches.dma-mapping/arm-dma-mapping-print-warning-when-atomic-coherent-allocation-fails.patch
@@ -0,0 +1,41 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:54:03 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:51:11 +0900
+Subject: [PATCH v2 54/58] ARM: DMA-Mapping: print warning when atomic coherent allocation fails
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-55-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+
+Print a loud warning when system runs out of memory from atomic DMA
+coherent pool to let users notice the potential problem.
+
+Reported-by: Aaro Koskinen <aaro.koskinen@iki.fi>
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+(cherry picked from commit fb71285f0c1633a85544784aae7577502274b77a)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+---
+ arch/arm/mm/dma-mapping.c | 4 ++++
+ 1 files changed, 4 insertions(+), 0 deletions(-)
+
+diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
+index 3ff2585..e597c89 100644
+--- a/arch/arm/mm/dma-mapping.c
++++ b/arch/arm/mm/dma-mapping.c
+@@ -460,6 +460,10 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page)
+ bitmap_set(pool->bitmap, pageno, count);
+ ptr = pool->vaddr + PAGE_SIZE * pageno;
+ *ret_page = pool->page + pageno;
++ } else {
++ pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n"
++ "Please increase it with coherent_pool= kernel parameter!\n",
++ (unsigned)pool->size / 1024);
+ }
+ spin_unlock_irqrestore(&pool->lock, flags);
+
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/arm-dma-mapping-refactor-out-to-introduce-__in_atomic_pool.patch b/patches.dma-mapping/arm-dma-mapping-refactor-out-to-introduce-__in_atomic_pool.patch
new file mode 100644
index 0000000000000..5a9261936beab
--- /dev/null
+++ b/patches.dma-mapping/arm-dma-mapping-refactor-out-to-introduce-__in_atomic_pool.patch
@@ -0,0 +1,70 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:54:05 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:51:12 +0900
+Subject: [PATCH v2 55/58] ARM: dma-mapping: Refactor out to introduce __in_atomic_pool
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-56-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Hiroshi Doyu <hdoyu@nvidia.com>
+
+Check the given range("start", "size") is included in "atomic_pool" or not.
+
+Signed-off-by: Hiroshi Doyu <hdoyu@nvidia.com>
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+(cherry picked from commit 21d0a75951ccf71f671eb24b61a8ad2b497be4b4)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+---
+ arch/arm/mm/dma-mapping.c | 26 ++++++++++++++++++++------
+ 1 files changed, 20 insertions(+), 6 deletions(-)
+
+diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
+index e597c89..94b7b78 100644
+--- a/arch/arm/mm/dma-mapping.c
++++ b/arch/arm/mm/dma-mapping.c
+@@ -470,20 +470,34 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page)
+ return ptr;
+ }
+
++static bool __in_atomic_pool(void *start, size_t size)
++{
++ struct dma_pool *pool = &atomic_pool;
++ void *end = start + size;
++ void *pool_start = pool->vaddr;
++ void *pool_end = pool->vaddr + pool->size;
++
++ if (start < pool_start || start > pool_end)
++ return false;
++
++ if (end <= pool_end)
++ return true;
++
++ WARN(1, "Wrong coherent size(%p-%p) from atomic pool(%p-%p)\n",
++ start, end - 1, pool_start, pool_end - 1);
++
++ return false;
++}
++
+ static int __free_from_pool(void *start, size_t size)
+ {
+ struct dma_pool *pool = &atomic_pool;
+ unsigned long pageno, count;
+ unsigned long flags;
+
+- if (start < pool->vaddr || start > pool->vaddr + pool->size)
++ if (!__in_atomic_pool(start, size))
+ return 0;
+
+- if (start + size > pool->vaddr + pool->size) {
+- WARN(1, "freeing wrong coherent size from pool\n");
+- return 0;
+- }
+-
+ pageno = (start - pool->vaddr) >> PAGE_SHIFT;
+ count = size >> PAGE_SHIFT;
+
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/arm-dma-mapping-remove-custom-consistent-dma-region.patch b/patches.dma-mapping/arm-dma-mapping-remove-custom-consistent-dma-region.patch
new file mode 100644
index 0000000000000..da0ba40bd96c0
--- /dev/null
+++ b/patches.dma-mapping/arm-dma-mapping-remove-custom-consistent-dma-region.patch
@@ -0,0 +1,745 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:53:31 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:50:58 +0900
+Subject: [PATCH v2 41/58] ARM: dma-mapping: remove custom consistent dma region
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-42-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+
+This patch changes dma-mapping subsystem to use generic vmalloc areas
+for all consistent dma allocations. This increases the total size limit
+of the consistent allocations and removes platform hacks and a lot of
+duplicated code.
+
+Atomic allocations are served from special pool preallocated on boot,
+because vmalloc areas cannot be reliably created in atomic context.
+
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Reviewed-by: Kyungmin Park <kyungmin.park@samsung.com>
+Reviewed-by: Minchan Kim <minchan@kernel.org>
+(cherry picked from commit e9da6e9905e639b0f842a244bc770b48ad0523e9)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+---
+ Documentation/kernel-parameters.txt | 2
+ arch/arm/include/asm/dma-mapping.h | 2
+ arch/arm/mm/dma-mapping.c | 492 ++++++++++++------------------------
+ arch/arm/mm/mm.h | 3
+ include/linux/vmalloc.h | 1
+ mm/vmalloc.c | 10
+ 6 files changed, 184 insertions(+), 326 deletions(-)
+
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -522,7 +522,7 @@ bytes respectively. Such letter suffixes
+
+ coherent_pool=nn[KMG] [ARM,KNL]
+ Sets the size of memory pool for coherent, atomic dma
+- allocations if Contiguous Memory Allocator (CMA) is used.
++ allocations, by default set to 256K.
+
+ code_bytes [X86] How many bytes of object code to print
+ in an oops report.
+--- a/arch/arm/include/asm/dma-mapping.h
++++ b/arch/arm/include/asm/dma-mapping.h
+@@ -226,7 +226,7 @@ static inline int dma_mmap_writecombine(
+ * DMA region above it's default value of 2MB. It must be called before the
+ * memory allocator is initialised, i.e. before any core_initcall.
+ */
+-extern void __init init_consistent_dma_size(unsigned long size);
++static inline void init_consistent_dma_size(unsigned long size) { }
+
+ /*
+ * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
+--- a/arch/arm/mm/dma-mapping.c
++++ b/arch/arm/mm/dma-mapping.c
+@@ -22,6 +22,7 @@
+ #include <linux/memblock.h>
+ #include <linux/slab.h>
+ #include <linux/iommu.h>
++#include <linux/io.h>
+ #include <linux/vmalloc.h>
+
+ #include <asm/memory.h>
+@@ -217,115 +218,70 @@ static void __dma_free_buffer(struct pag
+ }
+
+ #ifdef CONFIG_MMU
++#ifdef CONFIG_HUGETLB_PAGE
++#error ARM Coherent DMA allocator does not (yet) support huge TLB
++#endif
+
+-#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - consistent_base) >> PAGE_SHIFT)
+-#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - consistent_base) >> PMD_SHIFT)
+-
+-/*
+- * These are the page tables (2MB each) covering uncached, DMA consistent allocations
+- */
+-static pte_t **consistent_pte;
+-
+-#define DEFAULT_CONSISTENT_DMA_SIZE SZ_2M
++static void *__alloc_from_contiguous(struct device *dev, size_t size,
++ pgprot_t prot, struct page **ret_page);
+
+-static unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE;
++static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
++ pgprot_t prot, struct page **ret_page,
++ const void *caller);
+
+-void __init init_consistent_dma_size(unsigned long size)
++static void *
++__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
++ const void *caller)
+ {
+- unsigned long base = CONSISTENT_END - ALIGN(size, SZ_2M);
++ struct vm_struct *area;
++ unsigned long addr;
+
+- BUG_ON(consistent_pte); /* Check we're called before DMA region init */
+- BUG_ON(base < VMALLOC_END);
++ /*
++ * DMA allocation can be mapped to user space, so lets
++ * set VM_USERMAP flags too.
++ */
++ area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP,
++ caller);
++ if (!area)
++ return NULL;
++ addr = (unsigned long)area->addr;
++ area->phys_addr = __pfn_to_phys(page_to_pfn(page));
+
+- /* Grow region to accommodate specified size */
+- if (base < consistent_base)
+- consistent_base = base;
++ if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) {
++ vunmap((void *)addr);
++ return NULL;
++ }
++ return (void *)addr;
+ }
+
+-#include "vmregion.h"
+-
+-static struct arm_vmregion_head consistent_head = {
+- .vm_lock = __SPIN_LOCK_UNLOCKED(&consistent_head.vm_lock),
+- .vm_list = LIST_HEAD_INIT(consistent_head.vm_list),
+- .vm_end = CONSISTENT_END,
+-};
+-
+-#ifdef CONFIG_HUGETLB_PAGE
+-#error ARM Coherent DMA allocator does not (yet) support huge TLB
+-#endif
+-
+-/*
+- * Initialise the consistent memory allocation.
+- */
+-static int __init consistent_init(void)
++static void __dma_free_remap(void *cpu_addr, size_t size)
+ {
+- int ret = 0;
+- pgd_t *pgd;
+- pud_t *pud;
+- pmd_t *pmd;
+- pte_t *pte;
+- int i = 0;
+- unsigned long base = consistent_base;
+- unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT;
+-
+- if (IS_ENABLED(CONFIG_CMA) && !IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
+- return 0;
+-
+- consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL);
+- if (!consistent_pte) {
+- pr_err("%s: no memory\n", __func__);
+- return -ENOMEM;
++ unsigned int flags = VM_ARM_DMA_CONSISTENT | VM_USERMAP;
++ struct vm_struct *area = find_vm_area(cpu_addr);
++ if (!area || (area->flags & flags) != flags) {
++ WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
++ return;
+ }
+-
+- pr_debug("DMA memory: 0x%08lx - 0x%08lx:\n", base, CONSISTENT_END);
+- consistent_head.vm_start = base;
+-
+- do {
+- pgd = pgd_offset(&init_mm, base);
+-
+- pud = pud_alloc(&init_mm, pgd, base);
+- if (!pud) {
+- pr_err("%s: no pud tables\n", __func__);
+- ret = -ENOMEM;
+- break;
+- }
+-
+- pmd = pmd_alloc(&init_mm, pud, base);
+- if (!pmd) {
+- pr_err("%s: no pmd tables\n", __func__);
+- ret = -ENOMEM;
+- break;
+- }
+- WARN_ON(!pmd_none(*pmd));
+-
+- pte = pte_alloc_kernel(pmd, base);
+- if (!pte) {
+- pr_err("%s: no pte tables\n", __func__);
+- ret = -ENOMEM;
+- break;
+- }
+-
+- consistent_pte[i++] = pte;
+- base += PMD_SIZE;
+- } while (base < CONSISTENT_END);
+-
+- return ret;
++ unmap_kernel_range((unsigned long)cpu_addr, size);
++ vunmap(cpu_addr);
+ }
+-core_initcall(consistent_init);
+
+-static void *__alloc_from_contiguous(struct device *dev, size_t size,
+- pgprot_t prot, struct page **ret_page);
+-
+-static struct arm_vmregion_head coherent_head = {
+- .vm_lock = __SPIN_LOCK_UNLOCKED(&coherent_head.vm_lock),
+- .vm_list = LIST_HEAD_INIT(coherent_head.vm_list),
++struct dma_pool {
++ size_t size;
++ spinlock_t lock;
++ unsigned long *bitmap;
++ unsigned long nr_pages;
++ void *vaddr;
++ struct page *page;
+ };
+
+-static size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8;
++static struct dma_pool atomic_pool = {
++ .size = SZ_256K,
++};
+
+ static int __init early_coherent_pool(char *p)
+ {
+- coherent_pool_size = memparse(p, &p);
++ atomic_pool.size = memparse(p, &p);
+ return 0;
+ }
+ early_param("coherent_pool", early_coherent_pool);
+@@ -333,32 +289,45 @@ early_param("coherent_pool", early_coher
+ /*
+ * Initialise the coherent pool for atomic allocations.
+ */
+-static int __init coherent_init(void)
++static int __init atomic_pool_init(void)
+ {
++ struct dma_pool *pool = &atomic_pool;
+ pgprot_t prot = pgprot_dmacoherent(pgprot_kernel);
+- size_t size = coherent_pool_size;
++ unsigned long nr_pages = pool->size >> PAGE_SHIFT;
++ unsigned long *bitmap;
+ struct page *page;
+ void *ptr;
++ int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long);
+
+- if (!IS_ENABLED(CONFIG_CMA))
+- return 0;
++ bitmap = kzalloc(bitmap_size, GFP_KERNEL);
++ if (!bitmap)
++ goto no_bitmap;
+
+- ptr = __alloc_from_contiguous(NULL, size, prot, &page);
++ if (IS_ENABLED(CONFIG_CMA))
++ ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page);
++ else
++ ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot,
++ &page, NULL);
+ if (ptr) {
+- coherent_head.vm_start = (unsigned long) ptr;
+- coherent_head.vm_end = (unsigned long) ptr + size;
+- printk(KERN_INFO "DMA: preallocated %u KiB pool for atomic coherent allocations\n",
+- (unsigned)size / 1024);
++ spin_lock_init(&pool->lock);
++ pool->vaddr = ptr;
++ pool->page = page;
++ pool->bitmap = bitmap;
++ pool->nr_pages = nr_pages;
++ pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n",
++ (unsigned)pool->size / 1024);
+ return 0;
+ }
+- printk(KERN_ERR "DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
+- (unsigned)size / 1024);
++ kfree(bitmap);
++no_bitmap:
++ pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
++ (unsigned)pool->size / 1024);
+ return -ENOMEM;
+ }
+ /*
+ * CMA is activated by core_initcall, so we must be called after it.
+ */
+-postcore_initcall(coherent_init);
++postcore_initcall(atomic_pool_init);
+
+ struct dma_contig_early_reserve {
+ phys_addr_t base;
+@@ -406,112 +375,6 @@ void __init dma_contiguous_remap(void)
+ }
+ }
+
+-static void *
+-__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
+- const void *caller)
+-{
+- struct arm_vmregion *c;
+- size_t align;
+- int bit;
+-
+- if (!consistent_pte) {
+- pr_err("%s: not initialised\n", __func__);
+- dump_stack();
+- return NULL;
+- }
+-
+- /*
+- * Align the virtual region allocation - maximum alignment is
+- * a section size, minimum is a page size. This helps reduce
+- * fragmentation of the DMA space, and also prevents allocations
+- * smaller than a section from crossing a section boundary.
+- */
+- bit = fls(size - 1);
+- if (bit > SECTION_SHIFT)
+- bit = SECTION_SHIFT;
+- align = 1 << bit;
+-
+- /*
+- * Allocate a virtual address in the consistent mapping region.
+- */
+- c = arm_vmregion_alloc(&consistent_head, align, size,
+- gfp & ~(__GFP_DMA | __GFP_HIGHMEM), caller);
+- if (c) {
+- pte_t *pte;
+- int idx = CONSISTENT_PTE_INDEX(c->vm_start);
+- u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
+-
+- pte = consistent_pte[idx] + off;
+- c->priv = page;
+-
+- do {
+- BUG_ON(!pte_none(*pte));
+-
+- set_pte_ext(pte, mk_pte(page, prot), 0);
+- page++;
+- pte++;
+- off++;
+- if (off >= PTRS_PER_PTE) {
+- off = 0;
+- pte = consistent_pte[++idx];
+- }
+- } while (size -= PAGE_SIZE);
+-
+- dsb();
+-
+- return (void *)c->vm_start;
+- }
+- return NULL;
+-}
+-
+-static void __dma_free_remap(void *cpu_addr, size_t size)
+-{
+- struct arm_vmregion *c;
+- unsigned long addr;
+- pte_t *ptep;
+- int idx;
+- u32 off;
+-
+- c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr);
+- if (!c) {
+- pr_err("%s: trying to free invalid coherent area: %p\n",
+- __func__, cpu_addr);
+- dump_stack();
+- return;
+- }
+-
+- if ((c->vm_end - c->vm_start) != size) {
+- pr_err("%s: freeing wrong coherent size (%ld != %d)\n",
+- __func__, c->vm_end - c->vm_start, size);
+- dump_stack();
+- size = c->vm_end - c->vm_start;
+- }
+-
+- idx = CONSISTENT_PTE_INDEX(c->vm_start);
+- off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
+- ptep = consistent_pte[idx] + off;
+- addr = c->vm_start;
+- do {
+- pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep);
+-
+- ptep++;
+- addr += PAGE_SIZE;
+- off++;
+- if (off >= PTRS_PER_PTE) {
+- off = 0;
+- ptep = consistent_pte[++idx];
+- }
+-
+- if (pte_none(pte) || !pte_present(pte))
+- pr_crit("%s: bad page in kernel page table\n",
+- __func__);
+- } while (size -= PAGE_SIZE);
+-
+- flush_tlb_kernel_range(c->vm_start, c->vm_end);
+-
+- arm_vmregion_free(&consistent_head, c);
+-}
+-
+ static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr,
+ void *data)
+ {
+@@ -552,16 +415,17 @@ static void *__alloc_remap_buffer(struct
+ return ptr;
+ }
+
+-static void *__alloc_from_pool(struct device *dev, size_t size,
+- struct page **ret_page, const void *caller)
++static void *__alloc_from_pool(size_t size, struct page **ret_page)
+ {
+- struct arm_vmregion *c;
++ struct dma_pool *pool = &atomic_pool;
++ unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
++ unsigned int pageno;
++ unsigned long flags;
++ void *ptr = NULL;
+ size_t align;
+
+- if (!coherent_head.vm_start) {
+- printk(KERN_ERR "%s: coherent pool not initialised!\n",
+- __func__);
+- dump_stack();
++ if (!pool->vaddr) {
++ WARN(1, "coherent pool not initialised!\n");
+ return NULL;
+ }
+
+@@ -571,35 +435,41 @@ static void *__alloc_from_pool(struct de
+ * size. This helps reduce fragmentation of the DMA space.
+ */
+ align = PAGE_SIZE << get_order(size);
+- c = arm_vmregion_alloc(&coherent_head, align, size, 0, caller);
+- if (c) {
+- void *ptr = (void *)c->vm_start;
+- struct page *page = virt_to_page(ptr);
+- *ret_page = page;
+- return ptr;
++
++ spin_lock_irqsave(&pool->lock, flags);
++ pageno = bitmap_find_next_zero_area(pool->bitmap, pool->nr_pages,
++ 0, count, (1 << align) - 1);
++ if (pageno < pool->nr_pages) {
++ bitmap_set(pool->bitmap, pageno, count);
++ ptr = pool->vaddr + PAGE_SIZE * pageno;
++ *ret_page = pool->page + pageno;
+ }
+- return NULL;
++ spin_unlock_irqrestore(&pool->lock, flags);
++
++ return ptr;
+ }
+
+-static int __free_from_pool(void *cpu_addr, size_t size)
++static int __free_from_pool(void *start, size_t size)
+ {
+- unsigned long start = (unsigned long)cpu_addr;
+- unsigned long end = start + size;
+- struct arm_vmregion *c;
++ struct dma_pool *pool = &atomic_pool;
++ unsigned long pageno, count;
++ unsigned long flags;
+
+- if (start < coherent_head.vm_start || end > coherent_head.vm_end)
++ if (start < pool->vaddr || start > pool->vaddr + pool->size)
+ return 0;
+
+- c = arm_vmregion_find_remove(&coherent_head, (unsigned long)start);
+-
+- if ((c->vm_end - c->vm_start) != size) {
+- printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
+- __func__, c->vm_end - c->vm_start, size);
+- dump_stack();
+- size = c->vm_end - c->vm_start;
++ if (start + size > pool->vaddr + pool->size) {
++ WARN(1, "freeing wrong coherent size from pool\n");
++ return 0;
+ }
+
+- arm_vmregion_free(&coherent_head, c);
++ pageno = (start - pool->vaddr) >> PAGE_SHIFT;
++ count = size >> PAGE_SHIFT;
++
++ spin_lock_irqsave(&pool->lock, flags);
++ bitmap_clear(pool->bitmap, pageno, count);
++ spin_unlock_irqrestore(&pool->lock, flags);
++
+ return 1;
+ }
+
+@@ -644,7 +514,7 @@ static inline pgprot_t __get_dma_pgprot(
+
+ #define __get_dma_pgprot(attrs, prot) __pgprot(0)
+ #define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL
+-#define __alloc_from_pool(dev, size, ret_page, c) NULL
++#define __alloc_from_pool(size, ret_page) NULL
+ #define __alloc_from_contiguous(dev, size, prot, ret) NULL
+ #define __free_from_pool(cpu_addr, size) 0
+ #define __free_from_contiguous(dev, page, size) do { } while (0)
+@@ -702,10 +572,10 @@ static void *__dma_alloc(struct device *
+
+ if (arch_is_coherent() || nommu())
+ addr = __alloc_simple_buffer(dev, size, gfp, &page);
++ else if (gfp & GFP_ATOMIC)
++ addr = __alloc_from_pool(size, &page);
+ else if (!IS_ENABLED(CONFIG_CMA))
+ addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
+- else if (gfp & GFP_ATOMIC)
+- addr = __alloc_from_pool(dev, size, &page, caller);
+ else
+ addr = __alloc_from_contiguous(dev, size, prot, &page);
+
+@@ -998,9 +868,6 @@ static int arm_dma_set_mask(struct devic
+
+ static int __init dma_debug_do_init(void)
+ {
+-#ifdef CONFIG_MMU
+- arm_vmregion_create_proc("dma-mappings", &consistent_head);
+-#endif
+ dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
+ return 0;
+ }
+@@ -1117,61 +984,32 @@ static int __iommu_free_buffer(struct de
+ * Create a CPU mapping for a specified pages
+ */
+ static void *
+-__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot)
++__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
++ const void *caller)
+ {
+- struct arm_vmregion *c;
+- size_t align;
+- size_t count = size >> PAGE_SHIFT;
+- int bit;
+-
+- if (!consistent_pte[0]) {
+- pr_err("%s: not initialised\n", __func__);
+- dump_stack();
++ unsigned int i, nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
++ struct vm_struct *area;
++ unsigned long p;
++
++ area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP,
++ caller);
++ if (!area)
+ return NULL;
+- }
+-
+- /*
+- * Align the virtual region allocation - maximum alignment is
+- * a section size, minimum is a page size. This helps reduce
+- * fragmentation of the DMA space, and also prevents allocations
+- * smaller than a section from crossing a section boundary.
+- */
+- bit = fls(size - 1);
+- if (bit > SECTION_SHIFT)
+- bit = SECTION_SHIFT;
+- align = 1 << bit;
+-
+- /*
+- * Allocate a virtual address in the consistent mapping region.
+- */
+- c = arm_vmregion_alloc(&consistent_head, align, size,
+- gfp & ~(__GFP_DMA | __GFP_HIGHMEM), NULL);
+- if (c) {
+- pte_t *pte;
+- int idx = CONSISTENT_PTE_INDEX(c->vm_start);
+- int i = 0;
+- u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
+-
+- pte = consistent_pte[idx] + off;
+- c->priv = pages;
+-
+- do {
+- BUG_ON(!pte_none(*pte));
+-
+- set_pte_ext(pte, mk_pte(pages[i], prot), 0);
+- pte++;
+- off++;
+- i++;
+- if (off >= PTRS_PER_PTE) {
+- off = 0;
+- pte = consistent_pte[++idx];
+- }
+- } while (i < count);
+-
+- dsb();
+
+- return (void *)c->vm_start;
+- }
++ area->pages = pages;
++ area->nr_pages = nr_pages;
++ p = (unsigned long)area->addr;
++
++ for (i = 0; i < nr_pages; i++) {
++ phys_addr_t phys = __pfn_to_phys(page_to_pfn(pages[i]));
++ if (ioremap_page_range(p, p + PAGE_SIZE, phys, prot))
++ goto err;
++ p += PAGE_SIZE;
++ }
++ return area->addr;
++err:
++ unmap_kernel_range((unsigned long)area->addr, size);
++ vunmap(area->addr);
+ return NULL;
+ }
+
+@@ -1230,6 +1068,16 @@ static int __iommu_remove_mapping(struct
+ return 0;
+ }
+
++static struct page **__iommu_get_pages(void *cpu_addr)
++{
++ struct vm_struct *area;
++
++ area = find_vm_area(cpu_addr);
++ if (area && (area->flags & VM_ARM_DMA_CONSISTENT))
++ return area->pages;
++ return NULL;
++}
++
+ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
+ {
+@@ -1248,7 +1096,8 @@ static void *arm_iommu_alloc_attrs(struc
+ if (*handle == DMA_ERROR_CODE)
+ goto err_buffer;
+
+- addr = __iommu_alloc_remap(pages, size, gfp, prot);
++ addr = __iommu_alloc_remap(pages, size, gfp, prot,
++ __builtin_return_address(0));
+ if (!addr)
+ goto err_mapping;
+
+@@ -1265,31 +1114,25 @@ static int arm_iommu_mmap_attrs(struct d
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ struct dma_attrs *attrs)
+ {
+- struct arm_vmregion *c;
++ unsigned long uaddr = vma->vm_start;
++ unsigned long usize = vma->vm_end - vma->vm_start;
++ struct page **pages = __iommu_get_pages(cpu_addr);
+
+ vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
+- c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
+
+- if (c) {
+- struct page **pages = c->priv;
++ if (!pages)
++ return -ENXIO;
+
+- unsigned long uaddr = vma->vm_start;
+- unsigned long usize = vma->vm_end - vma->vm_start;
+- int i = 0;
+-
+- do {
+- int ret;
+-
+- ret = vm_insert_page(vma, uaddr, pages[i++]);
+- if (ret) {
+- pr_err("Remapping memory, error: %d\n", ret);
+- return ret;
+- }
++ do {
++ int ret = vm_insert_page(vma, uaddr, *pages++);
++ if (ret) {
++ pr_err("Remapping memory failed: %d\n", ret);
++ return ret;
++ }
++ uaddr += PAGE_SIZE;
++ usize -= PAGE_SIZE;
++ } while (usize > 0);
+
+- uaddr += PAGE_SIZE;
+- usize -= PAGE_SIZE;
+- } while (usize > 0);
+- }
+ return 0;
+ }
+
+@@ -1300,16 +1143,19 @@ static int arm_iommu_mmap_attrs(struct d
+ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t handle, struct dma_attrs *attrs)
+ {
+- struct arm_vmregion *c;
++ struct page **pages = __iommu_get_pages(cpu_addr);
+ size = PAGE_ALIGN(size);
+
+- c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
+- if (c) {
+- struct page **pages = c->priv;
+- __dma_free_remap(cpu_addr, size);
+- __iommu_remove_mapping(dev, handle, size);
+- __iommu_free_buffer(dev, pages, size);
++ if (!pages) {
++ WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
++ return;
+ }
++
++ unmap_kernel_range((unsigned long)cpu_addr, size);
++ vunmap(cpu_addr);
++
++ __iommu_remove_mapping(dev, handle, size);
++ __iommu_free_buffer(dev, pages, size);
+ }
+
+ /*
+--- a/arch/arm/mm/mm.h
++++ b/arch/arm/mm/mm.h
+@@ -62,6 +62,9 @@ extern void __flush_dcache_page(struct a
+ #define VM_ARM_MTYPE(mt) ((mt) << 20)
+ #define VM_ARM_MTYPE_MASK (0x1f << 20)
+
++/* consistent regions used by dma_alloc_attrs() */
++#define VM_ARM_DMA_CONSISTENT 0x20000000
++
+ #endif
+
+ #ifdef CONFIG_ZONE_DMA
+--- a/include/linux/vmalloc.h
++++ b/include/linux/vmalloc.h
+@@ -93,6 +93,7 @@ extern struct vm_struct *__get_vm_area_c
+ unsigned long start, unsigned long end,
+ const void *caller);
+ extern struct vm_struct *remove_vm_area(const void *addr);
++extern struct vm_struct *find_vm_area(const void *addr);
+
+ extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
+ struct page ***pages);
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -1403,7 +1403,15 @@ struct vm_struct *get_vm_area_caller(uns
+ -1, GFP_KERNEL, caller);
+ }
+
+-static struct vm_struct *find_vm_area(const void *addr)
++/**
++ * find_vm_area - find a continuous kernel virtual area
++ * @addr: base address
++ *
++ * Search for the kernel VM area starting at @addr, and return it.
++ * It is up to the caller to do all required locking to keep the returned
++ * pointer valid.
++ */
++struct vm_struct *find_vm_area(const void *addr)
+ {
+ struct vmap_area *va;
+
diff --git a/patches.dma-mapping/arm-dma-mapping-remove-offset-parameter-to-prepare-for-generic-dma_ops.patch b/patches.dma-mapping/arm-dma-mapping-remove-offset-parameter-to-prepare-for-generic-dma_ops.patch
new file mode 100644
index 0000000000000..c3cab3b4e8268
--- /dev/null
+++ b/patches.dma-mapping/arm-dma-mapping-remove-offset-parameter-to-prepare-for-generic-dma_ops.patch
@@ -0,0 +1,244 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:52:42 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:50:39 +0900
+Subject: [PATCH v2 22/58] ARM: dma-mapping: remove offset parameter to prepare for generic dma_ops
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-23-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+
+This patch removes the need for the offset parameter in dma bounce
+functions. This is required to let dma-mapping framework on ARM
+architecture to use common, generic dma_map_ops based dma-mapping
+helpers.
+
+Background and more detailed explaination:
+
+dma_*_range_* functions are available from the early days of the dma
+mapping api. They are the correct way of doing a partial syncs on the
+buffer (usually used by the network device drivers). This patch changes
+only the internal implementation of the dma bounce functions to let
+them tunnel through dma_map_ops structure. The driver api stays
+unchanged, so driver are obliged to call dma_*_range_* functions to
+keep code clean and easy to understand.
+
+The only drawback from this patch is reduced detection of the dma api
+abuse. Let us consider the following code:
+
+dma_addr = dma_map_single(dev, ptr, 64, DMA_TO_DEVICE);
+dma_sync_single_range_for_cpu(dev, dma_addr+16, 0, 32, DMA_TO_DEVICE);
+
+Without the patch such code fails, because dma bounce code is unable
+to find the bounce buffer for the given dma_address. After the patch
+the above sync call will be equivalent to:
+
+dma_sync_single_range_for_cpu(dev, dma_addr, 16, 32, DMA_TO_DEVICE);
+
+which succeeds.
+
+I don't consider this as a real problem, because DMA API abuse should be
+caught by debug_dma_* function family. This patch lets us to simplify
+the internal low-level implementation without chaning the driver visible
+API.
+
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Acked-by: Kyungmin Park <kyungmin.park@samsung.com>
+Tested-By: Subash Patel <subash.ramaswamy@linaro.org>
+(cherry picked from commit a227fb92a0f5f0dd8282719386e9b3a29f0d16b2)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+---
+ arch/arm/common/dmabounce.c | 13 +++++--
+ arch/arm/include/asm/dma-mapping.h | 67 +++++++++++++++++------------------
+ arch/arm/mm/dma-mapping.c | 4 +-
+ 3 files changed, 45 insertions(+), 39 deletions(-)
+
+diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
+index 210ad1b..32e9cc6 100644
+--- a/arch/arm/common/dmabounce.c
++++ b/arch/arm/common/dmabounce.c
+@@ -173,7 +173,8 @@ find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_
+ read_lock_irqsave(&device_info->lock, flags);
+
+ list_for_each_entry(b, &device_info->safe_buffers, node)
+- if (b->safe_dma_addr == safe_dma_addr) {
++ if (b->safe_dma_addr <= safe_dma_addr &&
++ b->safe_dma_addr + b->size > safe_dma_addr) {
+ rb = b;
+ break;
+ }
+@@ -362,9 +363,10 @@ void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
+ EXPORT_SYMBOL(__dma_unmap_page);
+
+ int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
+- unsigned long off, size_t sz, enum dma_data_direction dir)
++ size_t sz, enum dma_data_direction dir)
+ {
+ struct safe_buffer *buf;
++ unsigned long off;
+
+ dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
+ __func__, addr, off, sz, dir);
+@@ -373,6 +375,8 @@ int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
+ if (!buf)
+ return 1;
+
++ off = addr - buf->safe_dma_addr;
++
+ BUG_ON(buf->direction != dir);
+
+ dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
+@@ -391,9 +395,10 @@ int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
+ EXPORT_SYMBOL(dmabounce_sync_for_cpu);
+
+ int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
+- unsigned long off, size_t sz, enum dma_data_direction dir)
++ size_t sz, enum dma_data_direction dir)
+ {
+ struct safe_buffer *buf;
++ unsigned long off;
+
+ dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
+ __func__, addr, off, sz, dir);
+@@ -402,6 +407,8 @@ int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
+ if (!buf)
+ return 1;
+
++ off = addr - buf->safe_dma_addr;
++
+ BUG_ON(buf->direction != dir);
+
+ dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
+diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
+index 6a838da..eeddbe2 100644
+--- a/arch/arm/include/asm/dma-mapping.h
++++ b/arch/arm/include/asm/dma-mapping.h
+@@ -266,19 +266,17 @@ extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
+ /*
+ * Private functions
+ */
+-int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long,
+- size_t, enum dma_data_direction);
+-int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
+- size_t, enum dma_data_direction);
++int dmabounce_sync_for_cpu(struct device *, dma_addr_t, size_t, enum dma_data_direction);
++int dmabounce_sync_for_device(struct device *, dma_addr_t, size_t, enum dma_data_direction);
+ #else
+ static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr,
+- unsigned long offset, size_t size, enum dma_data_direction dir)
++ size_t size, enum dma_data_direction dir)
+ {
+ return 1;
+ }
+
+ static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
+- unsigned long offset, size_t size, enum dma_data_direction dir)
++ size_t size, enum dma_data_direction dir)
+ {
+ return 1;
+ }
+@@ -401,6 +399,33 @@ static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
+ __dma_unmap_page(dev, handle, size, dir);
+ }
+
++
++static inline void dma_sync_single_for_cpu(struct device *dev,
++ dma_addr_t handle, size_t size, enum dma_data_direction dir)
++{
++ BUG_ON(!valid_dma_direction(dir));
++
++ debug_dma_sync_single_for_cpu(dev, handle, size, dir);
++
++ if (!dmabounce_sync_for_cpu(dev, handle, size, dir))
++ return;
++
++ __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir);
++}
++
++static inline void dma_sync_single_for_device(struct device *dev,
++ dma_addr_t handle, size_t size, enum dma_data_direction dir)
++{
++ BUG_ON(!valid_dma_direction(dir));
++
++ debug_dma_sync_single_for_device(dev, handle, size, dir);
++
++ if (!dmabounce_sync_for_device(dev, handle, size, dir))
++ return;
++
++ __dma_single_cpu_to_dev(dma_to_virt(dev, handle), size, dir);
++}
++
+ /**
+ * dma_sync_single_range_for_cpu
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+@@ -423,40 +448,14 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
+ dma_addr_t handle, unsigned long offset, size_t size,
+ enum dma_data_direction dir)
+ {
+- BUG_ON(!valid_dma_direction(dir));
+-
+- debug_dma_sync_single_for_cpu(dev, handle + offset, size, dir);
+-
+- if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
+- return;
+-
+- __dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir);
++ dma_sync_single_for_cpu(dev, handle + offset, size, dir);
+ }
+
+ static inline void dma_sync_single_range_for_device(struct device *dev,
+ dma_addr_t handle, unsigned long offset, size_t size,
+ enum dma_data_direction dir)
+ {
+- BUG_ON(!valid_dma_direction(dir));
+-
+- debug_dma_sync_single_for_device(dev, handle + offset, size, dir);
+-
+- if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
+- return;
+-
+- __dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir);
+-}
+-
+-static inline void dma_sync_single_for_cpu(struct device *dev,
+- dma_addr_t handle, size_t size, enum dma_data_direction dir)
+-{
+- dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
+-}
+-
+-static inline void dma_sync_single_for_device(struct device *dev,
+- dma_addr_t handle, size_t size, enum dma_data_direction dir)
+-{
+- dma_sync_single_range_for_device(dev, handle, 0, size, dir);
++ dma_sync_single_for_device(dev, handle + offset, size, dir);
+ }
+
+ /*
+diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
+index fab24ec..c17c353 100644
+--- a/arch/arm/mm/dma-mapping.c
++++ b/arch/arm/mm/dma-mapping.c
+@@ -896,7 +896,7 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+ int i;
+
+ for_each_sg(sg, s, nents, i) {
+- if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0,
++ if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s),
+ sg_dma_len(s), dir))
+ continue;
+
+@@ -922,7 +922,7 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ int i;
+
+ for_each_sg(sg, s, nents, i) {
+- if (!dmabounce_sync_for_device(dev, sg_dma_address(s), 0,
++ if (!dmabounce_sync_for_device(dev, sg_dma_address(s),
+ sg_dma_len(s), dir))
+ continue;
+
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/arm-dma-mapping-remove-redundant-code-and-do-the-cleanup.patch b/patches.dma-mapping/arm-dma-mapping-remove-redundant-code-and-do-the-cleanup.patch
new file mode 100644
index 0000000000000..ba1c71d50456b
--- /dev/null
+++ b/patches.dma-mapping/arm-dma-mapping-remove-redundant-code-and-do-the-cleanup.patch
@@ -0,0 +1,179 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:52:52 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:50:43 +0900
+Subject: [PATCH v2 26/58] ARM: dma-mapping: remove redundant code and do the cleanup
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-27-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+
+This patch just performs a global cleanup in DMA mapping implementation
+for ARM architecture. Some of the tiny helper functions have been moved
+to the caller code, some have been merged together.
+
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Acked-by: Kyungmin Park <kyungmin.park@samsung.com>
+Acked-by: Arnd Bergmann <arnd@arndb.de>
+Tested-By: Subash Patel <subash.ramaswamy@linaro.org>
+(cherry picked from commit 51fde3499b531d4cf278f4d2eaa6c45b2865b16b)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+---
+ arch/arm/mm/dma-mapping.c | 88 ++++++++++++--------------------------------
+ 1 files changed, 24 insertions(+), 64 deletions(-)
+
+diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
+index 0265733..f7fbbda 100644
+--- a/arch/arm/mm/dma-mapping.c
++++ b/arch/arm/mm/dma-mapping.c
+@@ -45,64 +45,12 @@
+ * the CPU does do speculative prefetches, which means we clean caches
+ * before transfers and delay cache invalidation until transfer completion.
+ *
+- * Private support functions: these are not part of the API and are
+- * liable to change. Drivers must not use these.
+ */
+-static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
+- enum dma_data_direction dir)
+-{
+- extern void ___dma_single_cpu_to_dev(const void *, size_t,
+- enum dma_data_direction);
+-
+- if (!arch_is_coherent())
+- ___dma_single_cpu_to_dev(kaddr, size, dir);
+-}
+-
+-static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
+- enum dma_data_direction dir)
+-{
+- extern void ___dma_single_dev_to_cpu(const void *, size_t,
+- enum dma_data_direction);
+-
+- if (!arch_is_coherent())
+- ___dma_single_dev_to_cpu(kaddr, size, dir);
+-}
+-
+-static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
+- size_t size, enum dma_data_direction dir)
+-{
+- extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
++static void __dma_page_cpu_to_dev(struct page *, unsigned long,
+ size_t, enum dma_data_direction);
+-
+- if (!arch_is_coherent())
+- ___dma_page_cpu_to_dev(page, off, size, dir);
+-}
+-
+-static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
+- size_t size, enum dma_data_direction dir)
+-{
+- extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
++static void __dma_page_dev_to_cpu(struct page *, unsigned long,
+ size_t, enum dma_data_direction);
+
+- if (!arch_is_coherent())
+- ___dma_page_dev_to_cpu(page, off, size, dir);
+-}
+-
+-
+-static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
+- unsigned long offset, size_t size, enum dma_data_direction dir)
+-{
+- __dma_page_cpu_to_dev(page, offset, size, dir);
+- return pfn_to_dma(dev, page_to_pfn(page)) + offset;
+-}
+-
+-static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
+- size_t size, enum dma_data_direction dir)
+-{
+- __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
+- handle & ~PAGE_MASK, size, dir);
+-}
+-
+ /**
+ * arm_dma_map_page - map a portion of a page for streaming DMA
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+@@ -117,11 +65,13 @@ static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
+ * The device owns this memory once this call has completed. The CPU
+ * can regain ownership by calling dma_unmap_page().
+ */
+-static inline dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
++static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+ {
+- return __dma_map_page(dev, page, offset, size, dir);
++ if (!arch_is_coherent())
++ __dma_page_cpu_to_dev(page, offset, size, dir);
++ return pfn_to_dma(dev, page_to_pfn(page)) + offset;
+ }
+
+ /**
+@@ -138,27 +88,31 @@ static inline dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
+ * After this call, reads by the CPU to the buffer are guaranteed to see
+ * whatever the device wrote there.
+ */
+-static inline void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
++static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+ {
+- __dma_unmap_page(dev, handle, size, dir);
++ if (!arch_is_coherent())
++ __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
++ handle & ~PAGE_MASK, size, dir);
+ }
+
+-static inline void arm_dma_sync_single_for_cpu(struct device *dev,
++static void arm_dma_sync_single_for_cpu(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+ {
+ unsigned int offset = handle & (PAGE_SIZE - 1);
+ struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
+- __dma_page_dev_to_cpu(page, offset, size, dir);
++ if (!arch_is_coherent())
++ __dma_page_dev_to_cpu(page, offset, size, dir);
+ }
+
+-static inline void arm_dma_sync_single_for_device(struct device *dev,
++static void arm_dma_sync_single_for_device(struct device *dev,
+ dma_addr_t handle, size_t size, enum dma_data_direction dir)
+ {
+ unsigned int offset = handle & (PAGE_SIZE - 1);
+ struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
+- __dma_page_cpu_to_dev(page, offset, size, dir);
++ if (!arch_is_coherent())
++ __dma_page_cpu_to_dev(page, offset, size, dir);
+ }
+
+ static int arm_dma_set_mask(struct device *dev, u64 dma_mask);
+@@ -883,7 +837,13 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
+ } while (left);
+ }
+
+-void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
++/*
++ * Make an area consistent for devices.
++ * Note: Drivers should NOT use this function directly, as it will break
++ * platforms with CONFIG_DMABOUNCE.
++ * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
++ */
++static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
+ size_t size, enum dma_data_direction dir)
+ {
+ unsigned long paddr;
+@@ -899,7 +859,7 @@ void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
+ /* FIXME: non-speculating: flush on bidirectional mappings? */
+ }
+
+-void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
++static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
+ size_t size, enum dma_data_direction dir)
+ {
+ unsigned long paddr = page_to_phys(page) + off;
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/arm-dma-mapping-remove-unconditional-dependency-on-cma.patch b/patches.dma-mapping/arm-dma-mapping-remove-unconditional-dependency-on-cma.patch
new file mode 100644
index 0000000000000..7e474482d7426
--- /dev/null
+++ b/patches.dma-mapping/arm-dma-mapping-remove-unconditional-dependency-on-cma.patch
@@ -0,0 +1,88 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:52:59 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:50:46 +0900
+Subject: [PATCH v2 29/58] ARM: dma-mapping: remove unconditional dependency on CMA
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-30-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+
+CMA has been enabled unconditionally on all ARMv6+ systems to solve the
+long standing issue of double kernel mappings for all dma coherent
+buffers. This however created a dependency on CONFIG_EXPERIMENTAL for
+the whole ARM architecture what should be really avoided. This patch
+removes this dependency and lets one use old, well-tested dma-mapping
+implementation also on ARMv6+ systems without the need to use
+EXPERIMENTAL stuff.
+
+Reported-by: Russell King <linux@arm.linux.org.uk>
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+(cherry picked from commit f1ae98da8525c6b8b1c301c3a2b0bd2b6515cca2)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+---
+ arch/arm/Kconfig | 1 -
+ arch/arm/mm/dma-mapping.c | 10 ++++------
+ 2 files changed, 4 insertions(+), 7 deletions(-)
+
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index c7a542c..5c0c50e 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -6,7 +6,6 @@ config ARM
+ select HAVE_IDE if PCI || ISA || PCMCIA
+ select HAVE_DMA_ATTRS
+ select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7)
+- select CMA if (CPU_V6 || CPU_V6K || CPU_V7)
+ select HAVE_MEMBLOCK
+ select RTC_LIB
+ select SYS_SUPPORTS_APM_EMULATION
+diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
+index ea6b431..106c4c0 100644
+--- a/arch/arm/mm/dma-mapping.c
++++ b/arch/arm/mm/dma-mapping.c
+@@ -268,10 +268,8 @@ static int __init consistent_init(void)
+ unsigned long base = consistent_base;
+ unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT;
+
+-#ifndef CONFIG_ARM_DMA_USE_IOMMU
+- if (cpu_architecture() >= CPU_ARCH_ARMv6)
++ if (IS_ENABLED(CONFIG_CMA) && !IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
+ return 0;
+-#endif
+
+ consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL);
+ if (!consistent_pte) {
+@@ -342,7 +340,7 @@ static int __init coherent_init(void)
+ struct page *page;
+ void *ptr;
+
+- if (cpu_architecture() < CPU_ARCH_ARMv6)
++ if (!IS_ENABLED(CONFIG_CMA))
+ return 0;
+
+ ptr = __alloc_from_contiguous(NULL, size, prot, &page);
+@@ -704,7 +702,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+
+ if (arch_is_coherent() || nommu())
+ addr = __alloc_simple_buffer(dev, size, gfp, &page);
+- else if (cpu_architecture() < CPU_ARCH_ARMv6)
++ else if (!IS_ENABLED(CONFIG_CMA))
+ addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
+ else if (gfp & GFP_ATOMIC)
+ addr = __alloc_from_pool(dev, size, &page, caller);
+@@ -773,7 +771,7 @@ void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
+
+ if (arch_is_coherent() || nommu()) {
+ __dma_free_buffer(page, size);
+- } else if (cpu_architecture() < CPU_ARCH_ARMv6) {
++ } else if (!IS_ENABLED(CONFIG_CMA)) {
+ __dma_free_remap(cpu_addr, size);
+ __dma_free_buffer(page, size);
+ } else {
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/arm-dma-mapping-use-alloc-mmap-free-from-dma_ops.patch b/patches.dma-mapping/arm-dma-mapping-use-alloc-mmap-free-from-dma_ops.patch
new file mode 100644
index 0000000000000..f293c2f12af49
--- /dev/null
+++ b/patches.dma-mapping/arm-dma-mapping-use-alloc-mmap-free-from-dma_ops.patch
@@ -0,0 +1,340 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:52:55 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:50:44 +0900
+Subject: [PATCH v2 27/58] ARM: dma-mapping: use alloc, mmap, free from dma_ops
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-28-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+
+This patch converts dma_alloc/free/mmap_{coherent,writecombine}
+functions to use generic alloc/free/mmap methods from dma_map_ops
+structure. A new DMA_ATTR_WRITE_COMBINE DMA attribute have been
+introduced to implement writecombine methods.
+
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Acked-by: Kyungmin Park <kyungmin.park@samsung.com>
+Acked-by: Arnd Bergmann <arnd@arndb.de>
+Tested-By: Subash Patel <subash.ramaswamy@linaro.org>
+(cherry picked from commit f99d60341238fe73fc514129cd9ae4e44e1b2c47)
+
+Conflicts:
+
+ arch/arm/mm/dma-mapping.c
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+---
+ arch/arm/common/dmabounce.c | 3 +
+ arch/arm/include/asm/dma-mapping.h | 107 ++++++++++++++++++++++++++----------
+ arch/arm/mm/dma-mapping.c | 61 ++++++++------------
+ 3 files changed, 104 insertions(+), 67 deletions(-)
+
+diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
+index 813c29d..9d7eb53 100644
+--- a/arch/arm/common/dmabounce.c
++++ b/arch/arm/common/dmabounce.c
+@@ -449,6 +449,9 @@ static int dmabounce_set_mask(struct device *dev, u64 dma_mask)
+ }
+
+ static struct dma_map_ops dmabounce_ops = {
++ .alloc = arm_dma_alloc,
++ .free = arm_dma_free,
++ .mmap = arm_dma_mmap,
+ .map_page = dmabounce_map_page,
+ .unmap_page = dmabounce_unmap_page,
+ .sync_single_for_cpu = dmabounce_sync_for_cpu,
+diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
+index 7a7c3c7..bbef15d 100644
+--- a/arch/arm/include/asm/dma-mapping.h
++++ b/arch/arm/include/asm/dma-mapping.h
+@@ -5,6 +5,7 @@
+
+ #include <linux/mm_types.h>
+ #include <linux/scatterlist.h>
++#include <linux/dma-attrs.h>
+ #include <linux/dma-debug.h>
+
+ #include <asm-generic/dma-coherent.h>
+@@ -110,68 +111,115 @@ static inline void dma_free_noncoherent(struct device *dev, size_t size,
+ extern int dma_supported(struct device *dev, u64 mask);
+
+ /**
+- * dma_alloc_coherent - allocate consistent memory for DMA
++ * arm_dma_alloc - allocate consistent memory for DMA
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @size: required memory size
+ * @handle: bus-specific DMA address
++ * @attrs: optinal attributes that specific mapping properties
+ *
+- * Allocate some uncached, unbuffered memory for a device for
+- * performing DMA. This function allocates pages, and will
+- * return the CPU-viewed address, and sets @handle to be the
+- * device-viewed address.
++ * Allocate some memory for a device for performing DMA. This function
++ * allocates pages, and will return the CPU-viewed address, and sets @handle
++ * to be the device-viewed address.
+ */
+-extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
++extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
++ gfp_t gfp, struct dma_attrs *attrs);
++
++#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
++
++static inline void *dma_alloc_attrs(struct device *dev, size_t size,
++ dma_addr_t *dma_handle, gfp_t flag,
++ struct dma_attrs *attrs)
++{
++ struct dma_map_ops *ops = get_dma_ops(dev);
++ void *cpu_addr;
++ BUG_ON(!ops);
++
++ cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
++ debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
++ return cpu_addr;
++}
+
+ /**
+- * dma_free_coherent - free memory allocated by dma_alloc_coherent
++ * arm_dma_free - free memory allocated by arm_dma_alloc
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @size: size of memory originally requested in dma_alloc_coherent
+ * @cpu_addr: CPU-view address returned from dma_alloc_coherent
+ * @handle: device-view address returned from dma_alloc_coherent
++ * @attrs: optinal attributes that specific mapping properties
+ *
+ * Free (and unmap) a DMA buffer previously allocated by
+- * dma_alloc_coherent().
++ * arm_dma_alloc().
+ *
+ * References to memory and mappings associated with cpu_addr/handle
+ * during and after this call executing are illegal.
+ */
+-extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
++extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
++ dma_addr_t handle, struct dma_attrs *attrs);
++
++#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL)
++
++static inline void dma_free_attrs(struct device *dev, size_t size,
++ void *cpu_addr, dma_addr_t dma_handle,
++ struct dma_attrs *attrs)
++{
++ struct dma_map_ops *ops = get_dma_ops(dev);
++ BUG_ON(!ops);
++
++ debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
++ ops->free(dev, size, cpu_addr, dma_handle, attrs);
++}
+
+ /**
+- * dma_mmap_coherent - map a coherent DMA allocation into user space
++ * arm_dma_mmap - map a coherent DMA allocation into user space
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @vma: vm_area_struct describing requested user mapping
+ * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
+ * @handle: device-view address returned from dma_alloc_coherent
+ * @size: size of memory originally requested in dma_alloc_coherent
++ * @attrs: optinal attributes that specific mapping properties
+ *
+ * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
+ * into user space. The coherent DMA buffer must not be freed by the
+ * driver until the user space mapping has been released.
+ */
+-int dma_mmap_coherent(struct device *, struct vm_area_struct *,
+- void *, dma_addr_t, size_t);
++extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
++ void *cpu_addr, dma_addr_t dma_addr, size_t size,
++ struct dma_attrs *attrs);
+
++#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
+
+-/**
+- * dma_alloc_writecombine - allocate writecombining memory for DMA
+- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+- * @size: required memory size
+- * @handle: bus-specific DMA address
+- *
+- * Allocate some uncached, buffered memory for a device for
+- * performing DMA. This function allocates pages, and will
+- * return the CPU-viewed address, and sets @handle to be the
+- * device-viewed address.
+- */
+-extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
+- gfp_t);
++static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
++ void *cpu_addr, dma_addr_t dma_addr,
++ size_t size, struct dma_attrs *attrs)
++{
++ struct dma_map_ops *ops = get_dma_ops(dev);
++ BUG_ON(!ops);
++ return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
++}
+
+-#define dma_free_writecombine(dev,size,cpu_addr,handle) \
+- dma_free_coherent(dev,size,cpu_addr,handle)
++static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
++ dma_addr_t *dma_handle, gfp_t flag)
++{
++ DEFINE_DMA_ATTRS(attrs);
++ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
++ return dma_alloc_attrs(dev, size, dma_handle, flag, &attrs);
++}
+
+-int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
+- void *, dma_addr_t, size_t);
++static inline void dma_free_writecombine(struct device *dev, size_t size,
++ void *cpu_addr, dma_addr_t dma_handle)
++{
++ DEFINE_DMA_ATTRS(attrs);
++ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
++ return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
++}
++
++static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
++ void *cpu_addr, dma_addr_t dma_addr, size_t size)
++{
++ DEFINE_DMA_ATTRS(attrs);
++ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
++ return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
++}
+
+ /*
+ * This can be called during boot to increase the size of the consistent
+@@ -180,7 +228,6 @@ int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
+ */
+ extern void __init init_consistent_dma_size(unsigned long size);
+
+-
+ /*
+ * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
+ * and utilize bounce buffers as needed to work around limited DMA windows.
+diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
+index f7fbbda..ee4cb48 100644
+--- a/arch/arm/mm/dma-mapping.c
++++ b/arch/arm/mm/dma-mapping.c
+@@ -118,6 +118,9 @@ static void arm_dma_sync_single_for_device(struct device *dev,
+ static int arm_dma_set_mask(struct device *dev, u64 dma_mask);
+
+ struct dma_map_ops arm_dma_ops = {
++ .alloc = arm_dma_alloc,
++ .free = arm_dma_free,
++ .mmap = arm_dma_mmap,
+ .map_page = arm_dma_map_page,
+ .unmap_page = arm_dma_unmap_page,
+ .map_sg = arm_dma_map_sg,
+@@ -620,12 +623,21 @@ static void __free_from_contiguous(struct device *dev, struct page *page,
+ dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
+ }
+
++static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
++{
++ prot = dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs) ?
++ pgprot_writecombine(prot) :
++ pgprot_dmacoherent(prot);
++ return prot;
++}
++
+ #define nommu() 0
+
+ #else /* !CONFIG_MMU */
+
+ #define nommu() 1
+
++#define __get_dma_pgprot(attrs, prot) __pgprot(0)
+ #define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL
+ #define __alloc_from_pool(dev, size, ret_page, c) NULL
+ #define __alloc_from_contiguous(dev, size, prot, ret) NULL
+@@ -702,39 +714,31 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
+ * Allocate DMA-coherent memory space and return both the kernel remapped
+ * virtual and bus address for that space.
+ */
+-void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle,
+- gfp_t gfp)
++void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
++ gfp_t gfp, struct dma_attrs *attrs)
+ {
++ pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
+ void *memory;
+
+ if (dma_alloc_from_coherent(dev, size, handle, &memory))
+ return memory;
+
+- return __dma_alloc(dev, size, handle, gfp,
+- pgprot_dmacoherent(pgprot_kernel),
++ return __dma_alloc(dev, size, handle, gfp, prot,
+ __builtin_return_address(0));
+ }
+-EXPORT_SYMBOL(dma_alloc_coherent);
+
+ /*
+- * Allocate a writecombining region, in much the same way as
+- * dma_alloc_coherent above.
++ * Create userspace mapping for the DMA-coherent memory.
+ */
+-void *
+-dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
+-{
+- return __dma_alloc(dev, size, handle, gfp,
+- pgprot_writecombine(pgprot_kernel),
+- __builtin_return_address(0));
+-}
+-EXPORT_SYMBOL(dma_alloc_writecombine);
+-
+-static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
+- void *cpu_addr, dma_addr_t dma_addr, size_t size)
++int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
++ void *cpu_addr, dma_addr_t dma_addr, size_t size,
++ struct dma_attrs *attrs)
+ {
+ int ret = -ENXIO;
+ #ifdef CONFIG_MMU
+ unsigned long pfn = dma_to_pfn(dev, dma_addr);
++ vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
++
+ if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+ return ret;
+
+@@ -747,27 +751,11 @@ static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ return ret;
+ }
+
+-int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
+- void *cpu_addr, dma_addr_t dma_addr, size_t size)
+-{
+- vma->vm_page_prot = pgprot_dmacoherent(vma->vm_page_prot);
+- return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
+-}
+-EXPORT_SYMBOL(dma_mmap_coherent);
+-
+-int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
+- void *cpu_addr, dma_addr_t dma_addr, size_t size)
+-{
+- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+- return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
+-}
+-EXPORT_SYMBOL(dma_mmap_writecombine);
+-
+-
+ /*
+ * Free a buffer as defined by the above mapping.
+ */
+-void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
++void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
++ dma_addr_t handle, struct dma_attrs *attrs)
+ {
+ struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
+
+@@ -791,7 +779,6 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr
+ __free_from_contiguous(dev, page, size);
+ }
+ }
+-EXPORT_SYMBOL(dma_free_coherent);
+
+ static void dma_cache_maint_page(struct page *page, unsigned long offset,
+ size_t size, enum dma_data_direction dir,
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/arm-dma-mapping-use-asm-generic-dma-mapping-common.h.patch b/patches.dma-mapping/arm-dma-mapping-use-asm-generic-dma-mapping-common.h.patch
new file mode 100644
index 0000000000000..286042a5b6b65
--- /dev/null
+++ b/patches.dma-mapping/arm-dma-mapping-use-asm-generic-dma-mapping-common.h.patch
@@ -0,0 +1,534 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:52:45 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:50:40 +0900
+Subject: [PATCH v2 23/58] ARM: dma-mapping: use asm-generic/dma-mapping-common.h
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-24-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+
+This patch modifies dma-mapping implementation on ARM architecture to
+use common dma_map_ops structure and asm-generic/dma-mapping-common.h
+helpers.
+
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Acked-by: Kyungmin Park <kyungmin.park@samsung.com>
+Tested-By: Subash Patel <subash.ramaswamy@linaro.org>
+(cherry picked from commit 2dc6a016bbedf18f18ad73997e5338307d6dbde9)
+
+Conflicts:
+
+ arch/arm/Kconfig
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+---
+ arch/arm/Kconfig | 1 +
+ arch/arm/include/asm/device.h | 1 +
+ arch/arm/include/asm/dma-mapping.h | 196 +++++-------------------------------
+ arch/arm/mm/dma-mapping.c | 148 ++++++++++++++++-----------
+ 4 files changed, 115 insertions(+), 231 deletions(-)
+
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index d06c359..9bc6663 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -4,6 +4,7 @@ config ARM
+ select HAVE_AOUT
+ select HAVE_DMA_API_DEBUG
+ select HAVE_IDE if PCI || ISA || PCMCIA
++ select HAVE_DMA_ATTRS
+ select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7)
+ select CMA if (CPU_V6 || CPU_V6K || CPU_V7)
+ select HAVE_MEMBLOCK
+diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
+index 7aa3680..6e2cb0e 100644
+--- a/arch/arm/include/asm/device.h
++++ b/arch/arm/include/asm/device.h
+@@ -7,6 +7,7 @@
+ #define ASMARM_DEVICE_H
+
+ struct dev_archdata {
++ struct dma_map_ops *dma_ops;
+ #ifdef CONFIG_DMABOUNCE
+ struct dmabounce_device_info *dmabounce;
+ #endif
+diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
+index eeddbe2..6725a08 100644
+--- a/arch/arm/include/asm/dma-mapping.h
++++ b/arch/arm/include/asm/dma-mapping.h
+@@ -11,6 +11,27 @@
+ #include <asm/memory.h>
+
+ #define DMA_ERROR_CODE (~0)
++extern struct dma_map_ops arm_dma_ops;
++
++static inline struct dma_map_ops *get_dma_ops(struct device *dev)
++{
++ if (dev && dev->archdata.dma_ops)
++ return dev->archdata.dma_ops;
++ return &arm_dma_ops;
++}
++
++static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
++{
++ BUG_ON(!dev);
++ dev->archdata.dma_ops = ops;
++}
++
++#include <asm-generic/dma-mapping-common.h>
++
++static inline int dma_set_mask(struct device *dev, u64 mask)
++{
++ return get_dma_ops(dev)->set_dma_mask(dev, mask);
++}
+
+ #ifdef __arch_page_to_dma
+ #error Please update to __arch_pfn_to_dma
+@@ -119,7 +140,6 @@ static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
+
+ extern int dma_supported(struct device *, u64);
+ extern int dma_set_mask(struct device *, u64);
+-
+ /*
+ * DMA errors are defined by all-bits-set in the DMA address.
+ */
+@@ -297,179 +317,17 @@ static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
+ }
+ #endif /* CONFIG_DMABOUNCE */
+
+-/**
+- * dma_map_single - map a single buffer for streaming DMA
+- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+- * @cpu_addr: CPU direct mapped address of buffer
+- * @size: size of buffer to map
+- * @dir: DMA transfer direction
+- *
+- * Ensure that any data held in the cache is appropriately discarded
+- * or written back.
+- *
+- * The device owns this memory once this call has completed. The CPU
+- * can regain ownership by calling dma_unmap_single() or
+- * dma_sync_single_for_cpu().
+- */
+-static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
+- size_t size, enum dma_data_direction dir)
+-{
+- unsigned long offset;
+- struct page *page;
+- dma_addr_t addr;
+-
+- BUG_ON(!virt_addr_valid(cpu_addr));
+- BUG_ON(!virt_addr_valid(cpu_addr + size - 1));
+- BUG_ON(!valid_dma_direction(dir));
+-
+- page = virt_to_page(cpu_addr);
+- offset = (unsigned long)cpu_addr & ~PAGE_MASK;
+- addr = __dma_map_page(dev, page, offset, size, dir);
+- debug_dma_map_page(dev, page, offset, size, dir, addr, true);
+-
+- return addr;
+-}
+-
+-/**
+- * dma_map_page - map a portion of a page for streaming DMA
+- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+- * @page: page that buffer resides in
+- * @offset: offset into page for start of buffer
+- * @size: size of buffer to map
+- * @dir: DMA transfer direction
+- *
+- * Ensure that any data held in the cache is appropriately discarded
+- * or written back.
+- *
+- * The device owns this memory once this call has completed. The CPU
+- * can regain ownership by calling dma_unmap_page().
+- */
+-static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
+- unsigned long offset, size_t size, enum dma_data_direction dir)
+-{
+- dma_addr_t addr;
+-
+- BUG_ON(!valid_dma_direction(dir));
+-
+- addr = __dma_map_page(dev, page, offset, size, dir);
+- debug_dma_map_page(dev, page, offset, size, dir, addr, false);
+-
+- return addr;
+-}
+-
+-/**
+- * dma_unmap_single - unmap a single buffer previously mapped
+- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+- * @handle: DMA address of buffer
+- * @size: size of buffer (same as passed to dma_map_single)
+- * @dir: DMA transfer direction (same as passed to dma_map_single)
+- *
+- * Unmap a single streaming mode DMA translation. The handle and size
+- * must match what was provided in the previous dma_map_single() call.
+- * All other usages are undefined.
+- *
+- * After this call, reads by the CPU to the buffer are guaranteed to see
+- * whatever the device wrote there.
+- */
+-static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
+- size_t size, enum dma_data_direction dir)
+-{
+- debug_dma_unmap_page(dev, handle, size, dir, true);
+- __dma_unmap_page(dev, handle, size, dir);
+-}
+-
+-/**
+- * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
+- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+- * @handle: DMA address of buffer
+- * @size: size of buffer (same as passed to dma_map_page)
+- * @dir: DMA transfer direction (same as passed to dma_map_page)
+- *
+- * Unmap a page streaming mode DMA translation. The handle and size
+- * must match what was provided in the previous dma_map_page() call.
+- * All other usages are undefined.
+- *
+- * After this call, reads by the CPU to the buffer are guaranteed to see
+- * whatever the device wrote there.
+- */
+-static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
+- size_t size, enum dma_data_direction dir)
+-{
+- debug_dma_unmap_page(dev, handle, size, dir, false);
+- __dma_unmap_page(dev, handle, size, dir);
+-}
+-
+-
+-static inline void dma_sync_single_for_cpu(struct device *dev,
+- dma_addr_t handle, size_t size, enum dma_data_direction dir)
+-{
+- BUG_ON(!valid_dma_direction(dir));
+-
+- debug_dma_sync_single_for_cpu(dev, handle, size, dir);
+-
+- if (!dmabounce_sync_for_cpu(dev, handle, size, dir))
+- return;
+-
+- __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir);
+-}
+-
+-static inline void dma_sync_single_for_device(struct device *dev,
+- dma_addr_t handle, size_t size, enum dma_data_direction dir)
+-{
+- BUG_ON(!valid_dma_direction(dir));
+-
+- debug_dma_sync_single_for_device(dev, handle, size, dir);
+-
+- if (!dmabounce_sync_for_device(dev, handle, size, dir))
+- return;
+-
+- __dma_single_cpu_to_dev(dma_to_virt(dev, handle), size, dir);
+-}
+-
+-/**
+- * dma_sync_single_range_for_cpu
+- * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+- * @handle: DMA address of buffer
+- * @offset: offset of region to start sync
+- * @size: size of region to sync
+- * @dir: DMA transfer direction (same as passed to dma_map_single)
+- *
+- * Make physical memory consistent for a single streaming mode DMA
+- * translation after a transfer.
+- *
+- * If you perform a dma_map_single() but wish to interrogate the
+- * buffer using the cpu, yet do not wish to teardown the PCI dma
+- * mapping, you must call this function before doing so. At the
+- * next point you give the PCI dma address back to the card, you
+- * must first the perform a dma_sync_for_device, and then the
+- * device again owns the buffer.
+- */
+-static inline void dma_sync_single_range_for_cpu(struct device *dev,
+- dma_addr_t handle, unsigned long offset, size_t size,
+- enum dma_data_direction dir)
+-{
+- dma_sync_single_for_cpu(dev, handle + offset, size, dir);
+-}
+-
+-static inline void dma_sync_single_range_for_device(struct device *dev,
+- dma_addr_t handle, unsigned long offset, size_t size,
+- enum dma_data_direction dir)
+-{
+- dma_sync_single_for_device(dev, handle + offset, size, dir);
+-}
+-
+ /*
+ * The scatter list versions of the above methods.
+ */
+-extern int dma_map_sg(struct device *, struct scatterlist *, int,
+- enum dma_data_direction);
+-extern void dma_unmap_sg(struct device *, struct scatterlist *, int,
++extern int arm_dma_map_sg(struct device *, struct scatterlist *, int,
++ enum dma_data_direction, struct dma_attrs *attrs);
++extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int,
++ enum dma_data_direction, struct dma_attrs *attrs);
++extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
+ enum dma_data_direction);
+-extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
++extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
+ enum dma_data_direction);
+-extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
+- enum dma_data_direction);
+-
+
+ #endif /* __KERNEL__ */
+ #endif
+diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
+index c17c353..4a1f336 100644
+--- a/arch/arm/mm/dma-mapping.c
++++ b/arch/arm/mm/dma-mapping.c
+@@ -34,6 +34,85 @@
+
+ #include "mm.h"
+
++/**
++ * arm_dma_map_page - map a portion of a page for streaming DMA
++ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
++ * @page: page that buffer resides in
++ * @offset: offset into page for start of buffer
++ * @size: size of buffer to map
++ * @dir: DMA transfer direction
++ *
++ * Ensure that any data held in the cache is appropriately discarded
++ * or written back.
++ *
++ * The device owns this memory once this call has completed. The CPU
++ * can regain ownership by calling dma_unmap_page().
++ */
++static inline dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
++ unsigned long offset, size_t size, enum dma_data_direction dir,
++ struct dma_attrs *attrs)
++{
++ return __dma_map_page(dev, page, offset, size, dir);
++}
++
++/**
++ * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
++ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
++ * @handle: DMA address of buffer
++ * @size: size of buffer (same as passed to dma_map_page)
++ * @dir: DMA transfer direction (same as passed to dma_map_page)
++ *
++ * Unmap a page streaming mode DMA translation. The handle and size
++ * must match what was provided in the previous dma_map_page() call.
++ * All other usages are undefined.
++ *
++ * After this call, reads by the CPU to the buffer are guaranteed to see
++ * whatever the device wrote there.
++ */
++static inline void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
++ size_t size, enum dma_data_direction dir,
++ struct dma_attrs *attrs)
++{
++ __dma_unmap_page(dev, handle, size, dir);
++}
++
++static inline void arm_dma_sync_single_for_cpu(struct device *dev,
++ dma_addr_t handle, size_t size, enum dma_data_direction dir)
++{
++ unsigned int offset = handle & (PAGE_SIZE - 1);
++ struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
++ if (!dmabounce_sync_for_cpu(dev, handle, size, dir))
++ return;
++
++ __dma_page_dev_to_cpu(page, offset, size, dir);
++}
++
++static inline void arm_dma_sync_single_for_device(struct device *dev,
++ dma_addr_t handle, size_t size, enum dma_data_direction dir)
++{
++ unsigned int offset = handle & (PAGE_SIZE - 1);
++ struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
++ if (!dmabounce_sync_for_device(dev, handle, size, dir))
++ return;
++
++ __dma_page_cpu_to_dev(page, offset, size, dir);
++}
++
++static int arm_dma_set_mask(struct device *dev, u64 dma_mask);
++
++struct dma_map_ops arm_dma_ops = {
++ .map_page = arm_dma_map_page,
++ .unmap_page = arm_dma_unmap_page,
++ .map_sg = arm_dma_map_sg,
++ .unmap_sg = arm_dma_unmap_sg,
++ .sync_single_for_cpu = arm_dma_sync_single_for_cpu,
++ .sync_single_for_device = arm_dma_sync_single_for_device,
++ .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
++ .sync_sg_for_device = arm_dma_sync_sg_for_device,
++ .set_dma_mask = arm_dma_set_mask,
++};
++EXPORT_SYMBOL(arm_dma_ops);
++
+ static u64 get_coherent_dma_mask(struct device *dev)
+ {
+ u64 mask = (u64)arm_dma_limit;
+@@ -697,47 +776,6 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr
+ }
+ EXPORT_SYMBOL(dma_free_coherent);
+
+-/*
+- * Make an area consistent for devices.
+- * Note: Drivers should NOT use this function directly, as it will break
+- * platforms with CONFIG_DMABOUNCE.
+- * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
+- */
+-void ___dma_single_cpu_to_dev(const void *kaddr, size_t size,
+- enum dma_data_direction dir)
+-{
+- unsigned long paddr;
+-
+- BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
+-
+- dmac_map_area(kaddr, size, dir);
+-
+- paddr = __pa(kaddr);
+- if (dir == DMA_FROM_DEVICE) {
+- outer_inv_range(paddr, paddr + size);
+- } else {
+- outer_clean_range(paddr, paddr + size);
+- }
+- /* FIXME: non-speculating: flush on bidirectional mappings? */
+-}
+-EXPORT_SYMBOL(___dma_single_cpu_to_dev);
+-
+-void ___dma_single_dev_to_cpu(const void *kaddr, size_t size,
+- enum dma_data_direction dir)
+-{
+- BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
+-
+- /* FIXME: non-speculating: not required */
+- /* don't bother invalidating if DMA to device */
+- if (dir != DMA_TO_DEVICE) {
+- unsigned long paddr = __pa(kaddr);
+- outer_inv_range(paddr, paddr + size);
+- }
+-
+- dmac_unmap_area(kaddr, size, dir);
+-}
+-EXPORT_SYMBOL(___dma_single_dev_to_cpu);
+-
+ static void dma_cache_maint_page(struct page *page, unsigned long offset,
+ size_t size, enum dma_data_direction dir,
+ void (*op)(const void *, size_t, int))
+@@ -835,21 +873,18 @@ EXPORT_SYMBOL(___dma_page_dev_to_cpu);
+ * Device ownership issues as mentioned for dma_map_single are the same
+ * here.
+ */
+-int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+- enum dma_data_direction dir)
++int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
++ enum dma_data_direction dir, struct dma_attrs *attrs)
+ {
+ struct scatterlist *s;
+ int i, j;
+
+- BUG_ON(!valid_dma_direction(dir));
+-
+ for_each_sg(sg, s, nents, i) {
+ s->dma_address = __dma_map_page(dev, sg_page(s), s->offset,
+ s->length, dir);
+ if (dma_mapping_error(dev, s->dma_address))
+ goto bad_mapping;
+ }
+- debug_dma_map_sg(dev, sg, nents, nents, dir);
+ return nents;
+
+ bad_mapping:
+@@ -857,7 +892,6 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
+ return 0;
+ }
+-EXPORT_SYMBOL(dma_map_sg);
+
+ /**
+ * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
+@@ -869,18 +903,15 @@ EXPORT_SYMBOL(dma_map_sg);
+ * Unmap a set of streaming mode DMA translations. Again, CPU access
+ * rules concerning calls here are the same as for dma_unmap_single().
+ */
+-void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
+- enum dma_data_direction dir)
++void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
++ enum dma_data_direction dir, struct dma_attrs *attrs)
+ {
+ struct scatterlist *s;
+ int i;
+
+- debug_dma_unmap_sg(dev, sg, nents, dir);
+-
+ for_each_sg(sg, s, nents, i)
+ __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
+ }
+-EXPORT_SYMBOL(dma_unmap_sg);
+
+ /**
+ * dma_sync_sg_for_cpu
+@@ -889,7 +920,7 @@ EXPORT_SYMBOL(dma_unmap_sg);
+ * @nents: number of buffers to map (returned from dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ */
+-void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
++void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir)
+ {
+ struct scatterlist *s;
+@@ -903,10 +934,7 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+ __dma_page_dev_to_cpu(sg_page(s), s->offset,
+ s->length, dir);
+ }
+-
+- debug_dma_sync_sg_for_cpu(dev, sg, nents, dir);
+ }
+-EXPORT_SYMBOL(dma_sync_sg_for_cpu);
+
+ /**
+ * dma_sync_sg_for_device
+@@ -915,7 +943,7 @@ EXPORT_SYMBOL(dma_sync_sg_for_cpu);
+ * @nents: number of buffers to map (returned from dma_map_sg)
+ * @dir: DMA transfer direction (same as was passed to dma_map_sg)
+ */
+-void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
++void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir)
+ {
+ struct scatterlist *s;
+@@ -929,10 +957,7 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ __dma_page_cpu_to_dev(sg_page(s), s->offset,
+ s->length, dir);
+ }
+-
+- debug_dma_sync_sg_for_device(dev, sg, nents, dir);
+ }
+-EXPORT_SYMBOL(dma_sync_sg_for_device);
+
+ /*
+ * Return whether the given device DMA address mask can be supported
+@@ -948,7 +973,7 @@ int dma_supported(struct device *dev, u64 mask)
+ }
+ EXPORT_SYMBOL(dma_supported);
+
+-int dma_set_mask(struct device *dev, u64 dma_mask)
++static int arm_dma_set_mask(struct device *dev, u64 dma_mask)
+ {
+ if (!dev->dma_mask || !dma_supported(dev, dma_mask))
+ return -EIO;
+@@ -959,7 +984,6 @@ int dma_set_mask(struct device *dev, u64 dma_mask)
+
+ return 0;
+ }
+-EXPORT_SYMBOL(dma_set_mask);
+
+ #define PREALLOC_DMA_DEBUG_ENTRIES 4096
+
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/arm-dma-mapping-use-dma_mmap_from_coherent.patch b/patches.dma-mapping/arm-dma-mapping-use-dma_mmap_from_coherent.patch
new file mode 100644
index 0000000000000..0f31074adb739
--- /dev/null
+++ b/patches.dma-mapping/arm-dma-mapping-use-dma_mmap_from_coherent.patch
@@ -0,0 +1,41 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:52:33 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:50:36 +0900
+Subject: [PATCH v2 19/58] ARM: dma-mapping: use dma_mmap_from_coherent()
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-20-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+(cherry picked from commit 47142f07eea32e9c108f548a4b06c28bec7df6e4)
+
+Conflicts:
+
+ arch/arm/mm/dma-mapping.c
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+---
+ arch/arm/mm/dma-mapping.c | 3 +++
+ 1 files changed, 3 insertions(+), 0 deletions(-)
+
+diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
+index 153f555..210dd10 100644
+--- a/arch/arm/mm/dma-mapping.c
++++ b/arch/arm/mm/dma-mapping.c
+@@ -639,6 +639,9 @@ static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ int ret = -ENXIO;
+ #ifdef CONFIG_MMU
+ unsigned long pfn = dma_to_pfn(dev, dma_addr);
++ if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
++ return ret;
++
+ ret = remap_pfn_range(vma, vma->vm_start,
+ pfn + vma->vm_pgoff,
+ vma->vm_end - vma->vm_start,
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/arm-dma-mapping-use-pmd-size-for-section-unmap.patch b/patches.dma-mapping/arm-dma-mapping-use-pmd-size-for-section-unmap.patch
new file mode 100644
index 0000000000000..1def680314e70
--- /dev/null
+++ b/patches.dma-mapping/arm-dma-mapping-use-pmd-size-for-section-unmap.patch
@@ -0,0 +1,49 @@
+From ltsi-dev-bounces@lists.linuxfoundation.org Mon Oct 29 00:52:30 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:50:34 +0900
+Subject: [PATCH v2 17/58] ARM: dma-mapping: use PMD size for section unmap
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org
+Message-ID: <1351497075-32717-18-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Vitaly Andrianov <vitalya@ti.com>
+
+The dma_contiguous_remap() function clears existing section maps using
+the wrong size (PGDIR_SIZE instead of PMD_SIZE). This is a bug which
+does not affect non-LPAE systems, where PGDIR_SIZE and PMD_SIZE are the same.
+On LPAE systems, however, this bug causes the kernel to hang at this point.
+
+This fix has been tested on both LPAE and non-LPAE kernel builds.
+
+Signed-off-by: Vitaly Andrianov <vitalya@ti.com>
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+(cherry picked from commit 61f6c7a47a2f84b7ba4b65240ffe9247df772b06)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+---
+ arch/arm/mm/dma-mapping.c | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
+index 302f5bf..153f555 100644
+--- a/arch/arm/mm/dma-mapping.c
++++ b/arch/arm/mm/dma-mapping.c
+@@ -295,7 +295,7 @@ void __init dma_contiguous_remap(void)
+ * Clear previous low-memory mapping
+ */
+ for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
+- addr += PGDIR_SIZE)
++ addr += PMD_SIZE)
+ pmd_clear(pmd_off_k(addr));
+
+ iotable_init(&map, 1);
+--
+1.7.5.4
+
+_______________________________________________
+LTSI-dev mailing list
+LTSI-dev@lists.linuxfoundation.org
+https://lists.linuxfoundation.org/mailman/listinfo/ltsi-dev
+
diff --git a/patches.dma-mapping/arm-dma-mapping-use-pr_-instread-of-printk.patch b/patches.dma-mapping/arm-dma-mapping-use-pr_-instread-of-printk.patch
new file mode 100644
index 0000000000000..09287efa8c46a
--- /dev/null
+++ b/patches.dma-mapping/arm-dma-mapping-use-pr_-instread-of-printk.patch
@@ -0,0 +1,95 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:52:36 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:50:37 +0900
+Subject: [PATCH v2 20/58] ARM: dma-mapping: use pr_* instread of printk
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-21-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+
+Replace all calls to printk with pr_* functions family.
+
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Acked-by: Kyungmin Park <kyungmin.park@samsung.com>
+Acked-by: Arnd Bergmann <arnd@arndb.de>
+Tested-By: Subash Patel <subash.ramaswamy@linaro.org>
+(cherry picked from commit 6b6f770b573903f8a7d1cfab1fc662685653f413)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+---
+ arch/arm/mm/dma-mapping.c | 16 ++++++++--------
+ 1 files changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
+index 210dd10..bb19804 100644
+--- a/arch/arm/mm/dma-mapping.c
++++ b/arch/arm/mm/dma-mapping.c
+@@ -181,14 +181,14 @@ static int __init consistent_init(void)
+
+ pud = pud_alloc(&init_mm, pgd, base);
+ if (!pud) {
+- printk(KERN_ERR "%s: no pud tables\n", __func__);
++ pr_err("%s: no pud tables\n", __func__);
+ ret = -ENOMEM;
+ break;
+ }
+
+ pmd = pmd_alloc(&init_mm, pud, base);
+ if (!pmd) {
+- printk(KERN_ERR "%s: no pmd tables\n", __func__);
++ pr_err("%s: no pmd tables\n", __func__);
+ ret = -ENOMEM;
+ break;
+ }
+@@ -196,7 +196,7 @@ static int __init consistent_init(void)
+
+ pte = pte_alloc_kernel(pmd, base);
+ if (!pte) {
+- printk(KERN_ERR "%s: no pte tables\n", __func__);
++ pr_err("%s: no pte tables\n", __func__);
+ ret = -ENOMEM;
+ break;
+ }
+@@ -311,7 +311,7 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
+ int bit;
+
+ if (!consistent_pte) {
+- printk(KERN_ERR "%s: not initialised\n", __func__);
++ pr_err("%s: not initialised\n", __func__);
+ dump_stack();
+ return NULL;
+ }
+@@ -370,14 +370,14 @@ static void __dma_free_remap(void *cpu_addr, size_t size)
+
+ c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr);
+ if (!c) {
+- printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
++ pr_err("%s: trying to free invalid coherent area: %p\n",
+ __func__, cpu_addr);
+ dump_stack();
+ return;
+ }
+
+ if ((c->vm_end - c->vm_start) != size) {
+- printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
++ pr_err("%s: freeing wrong coherent size (%ld != %d)\n",
+ __func__, c->vm_end - c->vm_start, size);
+ dump_stack();
+ size = c->vm_end - c->vm_start;
+@@ -399,8 +399,8 @@ static void __dma_free_remap(void *cpu_addr, size_t size)
+ }
+
+ if (pte_none(pte) || !pte_present(pte))
+- printk(KERN_CRIT "%s: bad page in kernel page table\n",
+- __func__);
++ pr_crit("%s: bad page in kernel page table\n",
++ __func__);
+ } while (size -= PAGE_SIZE);
+
+ flush_tlb_kernel_range(c->vm_start, c->vm_end);
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/arm-fix-warning-caused-by-wrongly-typed-arm_dma_limit.patch b/patches.dma-mapping/arm-fix-warning-caused-by-wrongly-typed-arm_dma_limit.patch
new file mode 100644
index 0000000000000..524265fb21557
--- /dev/null
+++ b/patches.dma-mapping/arm-fix-warning-caused-by-wrongly-typed-arm_dma_limit.patch
@@ -0,0 +1,37 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:53:52 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:51:06 +0900
+Subject: [PATCH v2 49/58] ARM: fix warning caused by wrongly typed arm_dma_limit
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-50-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Russell King <rmk+kernel@arm.linux.org.uk>
+
+arch/arm/mm/init.c: In function 'arm_memblock_init':
+arch/arm/mm/init.c:380: warning: comparison of distinct pointer types lacks a cast
+
+by fixing the typecast in its definition when DMA_ZONE is disabled.
+This was missed in 4986e5c7c (ARM: mm: fix type of the arm_dma_limit
+global variable).
+
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+(cherry picked from commit 09b2ad13da3ac7c717dd86bfca7072d9b36f7449)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+---
+ arch/arm/mm/mm.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm/mm/mm.h
++++ b/arch/arm/mm/mm.h
+@@ -70,7 +70,7 @@ extern void __flush_dcache_page(struct a
+ #ifdef CONFIG_ZONE_DMA
+ extern phys_addr_t arm_dma_limit;
+ #else
+-#define arm_dma_limit ((u32)~0)
++#define arm_dma_limit ((phys_addr_t)~0)
+ #endif
+
+ extern phys_addr_t arm_lowmem_limit;
diff --git a/patches.dma-mapping/arm-integrate-cma-with-dma-mapping-subsystem.patch b/patches.dma-mapping/arm-integrate-cma-with-dma-mapping-subsystem.patch
new file mode 100644
index 0000000000000..e9394deb72e2e
--- /dev/null
+++ b/patches.dma-mapping/arm-integrate-cma-with-dma-mapping-subsystem.patch
@@ -0,0 +1,793 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:52:24 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:50:32 +0900
+Subject: [PATCH v2 15/58] ARM: integrate CMA with DMA-mapping subsystem
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-16-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+
+This patch adds support for CMA to dma-mapping subsystem for ARM
+architecture. By default a global CMA area is used, but specific devices
+are allowed to have their private memory areas if required (they can be
+created with dma_declare_contiguous() function during board
+initialisation).
+
+Contiguous memory areas reserved for DMA are remapped with 2-level page
+tables on boot. Once a buffer is requested, a low memory kernel mapping
+is updated to to match requested memory access type.
+
+GFP_ATOMIC allocations are performed from special pool which is created
+early during boot. This way remapping page attributes is not needed on
+allocation time.
+
+CMA has been enabled unconditionally for ARMv6+ systems.
+
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
+CC: Michal Nazarewicz <mina86@mina86.com>
+Acked-by: Arnd Bergmann <arnd@arndb.de>
+Tested-by: Rob Clark <rob.clark@linaro.org>
+Tested-by: Ohad Ben-Cohen <ohad@wizery.com>
+Tested-by: Benjamin Gaignard <benjamin.gaignard@linaro.org>
+Tested-by: Robert Nelson <robertcnelson@gmail.com>
+Tested-by: Barry Song <Baohua.Song@csr.com>
+(cherry picked from commit c79095092834a18ae74cfc08def1a5a101dc106c)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+---
+ Documentation/kernel-parameters.txt | 4
+ arch/arm/Kconfig | 2
+ arch/arm/include/asm/dma-contiguous.h | 15 +
+ arch/arm/include/asm/mach/map.h | 1
+ arch/arm/kernel/setup.c | 9
+ arch/arm/mm/dma-mapping.c | 370 +++++++++++++++++++++++++++-------
+ arch/arm/mm/init.c | 23 +-
+ arch/arm/mm/mm.h | 3
+ arch/arm/mm/mmu.c | 31 +-
+ 9 files changed, 370 insertions(+), 88 deletions(-)
+ create mode 100644 arch/arm/include/asm/dma-contiguous.h
+
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -520,6 +520,10 @@ bytes respectively. Such letter suffixes
+ a hypervisor.
+ Default: yes
+
++ coherent_pool=nn[KMG] [ARM,KNL]
++ Sets the size of memory pool for coherent, atomic dma
++ allocations if Contiguous Memory Allocator (CMA) is used.
++
+ code_bytes [X86] How many bytes of object code to print
+ in an oops report.
+ Range: 0 - 8192
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -4,6 +4,8 @@ config ARM
+ select HAVE_AOUT
+ select HAVE_DMA_API_DEBUG
+ select HAVE_IDE if PCI || ISA || PCMCIA
++ select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7)
++ select CMA if (CPU_V6 || CPU_V6K || CPU_V7)
+ select HAVE_MEMBLOCK
+ select RTC_LIB
+ select SYS_SUPPORTS_APM_EMULATION
+--- /dev/null
++++ b/arch/arm/include/asm/dma-contiguous.h
+@@ -0,0 +1,15 @@
++#ifndef ASMARM_DMA_CONTIGUOUS_H
++#define ASMARM_DMA_CONTIGUOUS_H
++
++#ifdef __KERNEL__
++#ifdef CONFIG_CMA
++
++#include <linux/types.h>
++#include <asm-generic/dma-contiguous.h>
++
++void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size);
++
++#endif
++#endif
++
++#endif
+--- a/arch/arm/include/asm/mach/map.h
++++ b/arch/arm/include/asm/mach/map.h
+@@ -30,6 +30,7 @@ struct map_desc {
+ #define MT_MEMORY_DTCM 12
+ #define MT_MEMORY_ITCM 13
+ #define MT_MEMORY_SO 14
++#define MT_MEMORY_DMA_READY 15
+
+ #ifdef CONFIG_MMU
+ extern void iotable_init(struct map_desc *, int);
+--- a/arch/arm/kernel/setup.c
++++ b/arch/arm/kernel/setup.c
+@@ -81,6 +81,7 @@ __setup("fpe=", fpe_setup);
+ extern void paging_init(struct machine_desc *desc);
+ extern void sanity_check_meminfo(void);
+ extern void reboot_setup(char *str);
++extern void setup_dma_zone(struct machine_desc *desc);
+
+ unsigned int processor_id;
+ EXPORT_SYMBOL(processor_id);
+@@ -939,12 +940,8 @@ void __init setup_arch(char **cmdline_p)
+ machine_desc = mdesc;
+ machine_name = mdesc->name;
+
+-#ifdef CONFIG_ZONE_DMA
+- if (mdesc->dma_zone_size) {
+- extern unsigned long arm_dma_zone_size;
+- arm_dma_zone_size = mdesc->dma_zone_size;
+- }
+-#endif
++ setup_dma_zone(mdesc);
++
+ if (mdesc->restart_mode)
+ reboot_setup(&mdesc->restart_mode);
+
+--- a/arch/arm/mm/dma-mapping.c
++++ b/arch/arm/mm/dma-mapping.c
+@@ -17,7 +17,9 @@
+ #include <linux/init.h>
+ #include <linux/device.h>
+ #include <linux/dma-mapping.h>
++#include <linux/dma-contiguous.h>
+ #include <linux/highmem.h>
++#include <linux/memblock.h>
+ #include <linux/slab.h>
+
+ #include <asm/memory.h>
+@@ -26,6 +28,9 @@
+ #include <asm/tlbflush.h>
+ #include <asm/sizes.h>
+ #include <asm/mach/arch.h>
++#include <asm/mach/map.h>
++#include <asm/system_info.h>
++#include <asm/dma-contiguous.h>
+
+ #include "mm.h"
+
+@@ -56,6 +61,19 @@ static u64 get_coherent_dma_mask(struct
+ return mask;
+ }
+
++static void __dma_clear_buffer(struct page *page, size_t size)
++{
++ void *ptr;
++ /*
++ * Ensure that the allocated pages are zeroed, and that any data
++ * lurking in the kernel direct-mapped region is invalidated.
++ */
++ ptr = page_address(page);
++ memset(ptr, 0, size);
++ dmac_flush_range(ptr, ptr + size);
++ outer_flush_range(__pa(ptr), __pa(ptr) + size);
++}
++
+ /*
+ * Allocate a DMA buffer for 'dev' of size 'size' using the
+ * specified gfp mask. Note that 'size' must be page aligned.
+@@ -64,23 +82,6 @@ static struct page *__dma_alloc_buffer(s
+ {
+ unsigned long order = get_order(size);
+ struct page *page, *p, *e;
+- void *ptr;
+- u64 mask = get_coherent_dma_mask(dev);
+-
+-#ifdef CONFIG_DMA_API_DEBUG
+- u64 limit = (mask + 1) & ~mask;
+- if (limit && size >= limit) {
+- dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
+- size, mask);
+- return NULL;
+- }
+-#endif
+-
+- if (!mask)
+- return NULL;
+-
+- if (mask < 0xffffffffULL)
+- gfp |= GFP_DMA;
+
+ page = alloc_pages(gfp, order);
+ if (!page)
+@@ -93,14 +94,7 @@ static struct page *__dma_alloc_buffer(s
+ for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
+ __free_page(p);
+
+- /*
+- * Ensure that the allocated pages are zeroed, and that any data
+- * lurking in the kernel direct-mapped region is invalidated.
+- */
+- ptr = page_address(page);
+- memset(ptr, 0, size);
+- dmac_flush_range(ptr, ptr + size);
+- outer_flush_range(__pa(ptr), __pa(ptr) + size);
++ __dma_clear_buffer(page, size);
+
+ return page;
+ }
+@@ -170,6 +164,9 @@ static int __init consistent_init(void)
+ unsigned long base = consistent_base;
+ unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT;
+
++ if (cpu_architecture() >= CPU_ARCH_ARMv6)
++ return 0;
++
+ consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL);
+ if (!consistent_pte) {
+ pr_err("%s: no memory\n", __func__);
+@@ -210,9 +207,101 @@ static int __init consistent_init(void)
+
+ return ret;
+ }
+-
+ core_initcall(consistent_init);
+
++static void *__alloc_from_contiguous(struct device *dev, size_t size,
++ pgprot_t prot, struct page **ret_page);
++
++static struct arm_vmregion_head coherent_head = {
++ .vm_lock = __SPIN_LOCK_UNLOCKED(&coherent_head.vm_lock),
++ .vm_list = LIST_HEAD_INIT(coherent_head.vm_list),
++};
++
++size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8;
++
++static int __init early_coherent_pool(char *p)
++{
++ coherent_pool_size = memparse(p, &p);
++ return 0;
++}
++early_param("coherent_pool", early_coherent_pool);
++
++/*
++ * Initialise the coherent pool for atomic allocations.
++ */
++static int __init coherent_init(void)
++{
++ pgprot_t prot = pgprot_dmacoherent(pgprot_kernel);
++ size_t size = coherent_pool_size;
++ struct page *page;
++ void *ptr;
++
++ if (cpu_architecture() < CPU_ARCH_ARMv6)
++ return 0;
++
++ ptr = __alloc_from_contiguous(NULL, size, prot, &page);
++ if (ptr) {
++ coherent_head.vm_start = (unsigned long) ptr;
++ coherent_head.vm_end = (unsigned long) ptr + size;
++ printk(KERN_INFO "DMA: preallocated %u KiB pool for atomic coherent allocations\n",
++ (unsigned)size / 1024);
++ return 0;
++ }
++ printk(KERN_ERR "DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
++ (unsigned)size / 1024);
++ return -ENOMEM;
++}
++/*
++ * CMA is activated by core_initcall, so we must be called after it.
++ */
++postcore_initcall(coherent_init);
++
++struct dma_contig_early_reserve {
++ phys_addr_t base;
++ unsigned long size;
++};
++
++static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata;
++
++static int dma_mmu_remap_num __initdata;
++
++void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
++{
++ dma_mmu_remap[dma_mmu_remap_num].base = base;
++ dma_mmu_remap[dma_mmu_remap_num].size = size;
++ dma_mmu_remap_num++;
++}
++
++void __init dma_contiguous_remap(void)
++{
++ int i;
++ for (i = 0; i < dma_mmu_remap_num; i++) {
++ phys_addr_t start = dma_mmu_remap[i].base;
++ phys_addr_t end = start + dma_mmu_remap[i].size;
++ struct map_desc map;
++ unsigned long addr;
++
++ if (end > arm_lowmem_limit)
++ end = arm_lowmem_limit;
++ if (start >= end)
++ return;
++
++ map.pfn = __phys_to_pfn(start);
++ map.virtual = __phys_to_virt(start);
++ map.length = end - start;
++ map.type = MT_MEMORY_DMA_READY;
++
++ /*
++ * Clear previous low-memory mapping
++ */
++ for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
++ addr += PGDIR_SIZE)
++ pmd_clear(pmd_off_k(addr));
++
++ iotable_init(&map, 1);
++ }
++}
++
+ static void *
+ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
+ const void *caller)
+@@ -319,20 +408,173 @@ static void __dma_free_remap(void *cpu_a
+ arm_vmregion_free(&consistent_head, c);
+ }
+
++static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr,
++ void *data)
++{
++ struct page *page = virt_to_page(addr);
++ pgprot_t prot = *(pgprot_t *)data;
++
++ set_pte_ext(pte, mk_pte(page, prot), 0);
++ return 0;
++}
++
++static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
++{
++ unsigned long start = (unsigned long) page_address(page);
++ unsigned end = start + size;
++
++ apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
++ dsb();
++ flush_tlb_kernel_range(start, end);
++}
++
++static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
++ pgprot_t prot, struct page **ret_page,
++ const void *caller)
++{
++ struct page *page;
++ void *ptr;
++ page = __dma_alloc_buffer(dev, size, gfp);
++ if (!page)
++ return NULL;
++
++ ptr = __dma_alloc_remap(page, size, gfp, prot, caller);
++ if (!ptr) {
++ __dma_free_buffer(page, size);
++ return NULL;
++ }
++
++ *ret_page = page;
++ return ptr;
++}
++
++static void *__alloc_from_pool(struct device *dev, size_t size,
++ struct page **ret_page, const void *caller)
++{
++ struct arm_vmregion *c;
++ size_t align;
++
++ if (!coherent_head.vm_start) {
++ printk(KERN_ERR "%s: coherent pool not initialised!\n",
++ __func__);
++ dump_stack();
++ return NULL;
++ }
++
++ /*
++ * Align the region allocation - allocations from pool are rather
++ * small, so align them to their order in pages, minimum is a page
++ * size. This helps reduce fragmentation of the DMA space.
++ */
++ align = PAGE_SIZE << get_order(size);
++ c = arm_vmregion_alloc(&coherent_head, align, size, 0, caller);
++ if (c) {
++ void *ptr = (void *)c->vm_start;
++ struct page *page = virt_to_page(ptr);
++ *ret_page = page;
++ return ptr;
++ }
++ return NULL;
++}
++
++static int __free_from_pool(void *cpu_addr, size_t size)
++{
++ unsigned long start = (unsigned long)cpu_addr;
++ unsigned long end = start + size;
++ struct arm_vmregion *c;
++
++ if (start < coherent_head.vm_start || end > coherent_head.vm_end)
++ return 0;
++
++ c = arm_vmregion_find_remove(&coherent_head, (unsigned long)start);
++
++ if ((c->vm_end - c->vm_start) != size) {
++ printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
++ __func__, c->vm_end - c->vm_start, size);
++ dump_stack();
++ size = c->vm_end - c->vm_start;
++ }
++
++ arm_vmregion_free(&coherent_head, c);
++ return 1;
++}
++
++static void *__alloc_from_contiguous(struct device *dev, size_t size,
++ pgprot_t prot, struct page **ret_page)
++{
++ unsigned long order = get_order(size);
++ size_t count = size >> PAGE_SHIFT;
++ struct page *page;
++
++ page = dma_alloc_from_contiguous(dev, count, order);
++ if (!page)
++ return NULL;
++
++ __dma_clear_buffer(page, size);
++ __dma_remap(page, size, prot);
++
++ *ret_page = page;
++ return page_address(page);
++}
++
++static void __free_from_contiguous(struct device *dev, struct page *page,
++ size_t size)
++{
++ __dma_remap(page, size, pgprot_kernel);
++ dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
++}
++
++#define nommu() 0
++
+ #else /* !CONFIG_MMU */
+
+-#define __dma_alloc_remap(page, size, gfp, prot, c) page_address(page)
+-#define __dma_free_remap(addr, size) do { } while (0)
++#define nommu() 1
++
++#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL
++#define __alloc_from_pool(dev, size, ret_page, c) NULL
++#define __alloc_from_contiguous(dev, size, prot, ret) NULL
++#define __free_from_pool(cpu_addr, size) 0
++#define __free_from_contiguous(dev, page, size) do { } while (0)
++#define __dma_free_remap(cpu_addr, size) do { } while (0)
+
+ #endif /* CONFIG_MMU */
+
+-static void *
+-__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
+- pgprot_t prot, const void *caller)
++static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
++ struct page **ret_page)
++{
++ struct page *page;
++ page = __dma_alloc_buffer(dev, size, gfp);
++ if (!page)
++ return NULL;
++
++ *ret_page = page;
++ return page_address(page);
++}
++
++
++
++static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
++ gfp_t gfp, pgprot_t prot, const void *caller)
+ {
++ u64 mask = get_coherent_dma_mask(dev);
+ struct page *page;
+ void *addr;
+
++#ifdef CONFIG_DMA_API_DEBUG
++ u64 limit = (mask + 1) & ~mask;
++ if (limit && size >= limit) {
++ dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
++ size, mask);
++ return NULL;
++ }
++#endif
++
++ if (!mask)
++ return NULL;
++
++ if (mask < 0xffffffffULL)
++ gfp |= GFP_DMA;
++
+ /*
+ * Following is a work-around (a.k.a. hack) to prevent pages
+ * with __GFP_COMP being passed to split_page() which cannot
+@@ -345,19 +587,17 @@ __dma_alloc(struct device *dev, size_t s
+ *handle = ~0;
+ size = PAGE_ALIGN(size);
+
+- page = __dma_alloc_buffer(dev, size, gfp);
+- if (!page)
+- return NULL;
+-
+- if (!arch_is_coherent())
+- addr = __dma_alloc_remap(page, size, gfp, prot, caller);
++ if (arch_is_coherent() || nommu())
++ addr = __alloc_simple_buffer(dev, size, gfp, &page);
++ else if (cpu_architecture() < CPU_ARCH_ARMv6)
++ addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
++ else if (gfp & GFP_ATOMIC)
++ addr = __alloc_from_pool(dev, size, &page, caller);
+ else
+- addr = page_address(page);
++ addr = __alloc_from_contiguous(dev, size, prot, &page);
+
+ if (addr)
+ *handle = pfn_to_dma(dev, page_to_pfn(page));
+- else
+- __dma_free_buffer(page, size);
+
+ return addr;
+ }
+@@ -366,8 +606,8 @@ __dma_alloc(struct device *dev, size_t s
+ * Allocate DMA-coherent memory space and return both the kernel remapped
+ * virtual and bus address for that space.
+ */
+-void *
+-dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
++void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle,
++ gfp_t gfp)
+ {
+ void *memory;
+
+@@ -398,25 +638,11 @@ static int dma_mmap(struct device *dev,
+ {
+ int ret = -ENXIO;
+ #ifdef CONFIG_MMU
+- unsigned long user_size, kern_size;
+- struct arm_vmregion *c;
+-
+- user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+-
+- c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
+- if (c) {
+- unsigned long off = vma->vm_pgoff;
+-
+- kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT;
+-
+- if (off < kern_size &&
+- user_size <= (kern_size - off)) {
+- ret = remap_pfn_range(vma, vma->vm_start,
+- page_to_pfn(c->vm_pages) + off,
+- user_size << PAGE_SHIFT,
+- vma->vm_page_prot);
+- }
+- }
++ unsigned long pfn = dma_to_pfn(dev, dma_addr);
++ ret = remap_pfn_range(vma, vma->vm_start,
++ pfn + vma->vm_pgoff,
++ vma->vm_end - vma->vm_start,
++ vma->vm_page_prot);
+ #endif /* CONFIG_MMU */
+
+ return ret;
+@@ -438,23 +664,33 @@ int dma_mmap_writecombine(struct device
+ }
+ EXPORT_SYMBOL(dma_mmap_writecombine);
+
++
+ /*
+- * free a page as defined by the above mapping.
+- * Must not be called with IRQs disabled.
++ * Free a buffer as defined by the above mapping.
+ */
+ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
+ {
+- WARN_ON(irqs_disabled());
++ struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
+
+ if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
+ return;
+
+ size = PAGE_ALIGN(size);
+
+- if (!arch_is_coherent())
++ if (arch_is_coherent() || nommu()) {
++ __dma_free_buffer(page, size);
++ } else if (cpu_architecture() < CPU_ARCH_ARMv6) {
+ __dma_free_remap(cpu_addr, size);
+-
+- __dma_free_buffer(pfn_to_page(dma_to_pfn(dev, handle)), size);
++ __dma_free_buffer(page, size);
++ } else {
++ if (__free_from_pool(cpu_addr, size))
++ return;
++ /*
++ * Non-atomic allocations cannot be freed with IRQs disabled
++ */
++ WARN_ON(irqs_disabled());
++ __free_from_contiguous(dev, page, size);
++ }
+ }
+ EXPORT_SYMBOL(dma_free_coherent);
+
+--- a/arch/arm/mm/init.c
++++ b/arch/arm/mm/init.c
+@@ -20,6 +20,7 @@
+ #include <linux/highmem.h>
+ #include <linux/gfp.h>
+ #include <linux/memblock.h>
++#include <linux/dma-contiguous.h>
+
+ #include <asm/mach-types.h>
+ #include <asm/memblock.h>
+@@ -226,6 +227,17 @@ static void __init arm_adjust_dma_zone(u
+ }
+ #endif
+
++void __init setup_dma_zone(struct machine_desc *mdesc)
++{
++#ifdef CONFIG_ZONE_DMA
++ if (mdesc->dma_zone_size) {
++ arm_dma_zone_size = mdesc->dma_zone_size;
++ arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
++ } else
++ arm_dma_limit = 0xffffffff;
++#endif
++}
++
+ static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
+ unsigned long max_high)
+ {
+@@ -273,12 +285,9 @@ static void __init arm_bootmem_free(unsi
+ * Adjust the sizes according to any special requirements for
+ * this machine type.
+ */
+- if (arm_dma_zone_size) {
++ if (arm_dma_zone_size)
+ arm_adjust_dma_zone(zone_size, zhole_size,
+ arm_dma_zone_size >> PAGE_SHIFT);
+- arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
+- } else
+- arm_dma_limit = 0xffffffff;
+ #endif
+
+ free_area_init_node(0, zone_size, min, zhole_size);
+@@ -364,6 +373,12 @@ void __init arm_memblock_init(struct mem
+ if (mdesc->reserve)
+ mdesc->reserve();
+
++ /*
++ * reserve memory for DMA contigouos allocations,
++ * must come from DMA area inside low memory
++ */
++ dma_contiguous_reserve(min(arm_dma_limit, arm_lowmem_limit));
++
+ arm_memblock_steal_permitted = false;
+ memblock_allow_resize();
+ memblock_dump_all();
+--- a/arch/arm/mm/mm.h
++++ b/arch/arm/mm/mm.h
+@@ -70,5 +70,8 @@ extern u32 arm_dma_limit;
+ #define arm_dma_limit ((u32)~0)
+ #endif
+
++extern phys_addr_t arm_lowmem_limit;
++
+ void __init bootmem_init(void);
+ void arm_mm_memblock_reserve(void);
++void dma_contiguous_remap(void);
+--- a/arch/arm/mm/mmu.c
++++ b/arch/arm/mm/mmu.c
+@@ -288,6 +288,11 @@ static struct mem_type mem_types[] = {
+ PMD_SECT_UNCACHED | PMD_SECT_XN,
+ .domain = DOMAIN_KERNEL,
+ },
++ [MT_MEMORY_DMA_READY] = {
++ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
++ .prot_l1 = PMD_TYPE_TABLE,
++ .domain = DOMAIN_KERNEL,
++ },
+ };
+
+ const struct mem_type *get_mem_type(unsigned int type)
+@@ -429,6 +434,7 @@ static void __init build_mem_type_table(
+ if (arch_is_coherent() && cpu_is_xsc3()) {
+ mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
+ mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
++ mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
+ mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
+ mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
+ }
+@@ -460,6 +466,7 @@ static void __init build_mem_type_table(
+ mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
+ mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
+ mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
++ mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
+ mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
+ mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
+ }
+@@ -512,6 +519,7 @@ static void __init build_mem_type_table(
+ mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
+ mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
+ mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
++ mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
+ mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
+ mem_types[MT_ROM].prot_sect |= cp->pmd;
+
+@@ -596,7 +604,7 @@ static void __init alloc_init_section(pu
+ * L1 entries, whereas PGDs refer to a group of L1 entries making
+ * up one logical pointer to an L2 table.
+ */
+- if (((addr | end | phys) & ~SECTION_MASK) == 0) {
++ if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) {
+ pmd_t *p = pmd;
+
+ #ifndef CONFIG_ARM_LPAE
+@@ -887,7 +895,7 @@ static int __init early_vmalloc(char *ar
+ }
+ early_param("vmalloc", early_vmalloc);
+
+-static phys_addr_t lowmem_limit __initdata = 0;
++phys_addr_t arm_lowmem_limit __initdata = 0;
+
+ void __init sanity_check_meminfo(void)
+ {
+@@ -970,8 +978,8 @@ void __init sanity_check_meminfo(void)
+ bank->size = newsize;
+ }
+ #endif
+- if (!bank->highmem && bank->start + bank->size > lowmem_limit)
+- lowmem_limit = bank->start + bank->size;
++ if (!bank->highmem && bank->start + bank->size > arm_lowmem_limit)
++ arm_lowmem_limit = bank->start + bank->size;
+
+ j++;
+ }
+@@ -996,8 +1004,8 @@ void __init sanity_check_meminfo(void)
+ }
+ #endif
+ meminfo.nr_banks = j;
+- high_memory = __va(lowmem_limit - 1) + 1;
+- memblock_set_current_limit(lowmem_limit);
++ high_memory = __va(arm_lowmem_limit - 1) + 1;
++ memblock_set_current_limit(arm_lowmem_limit);
+ }
+
+ static inline void prepare_page_table(void)
+@@ -1022,8 +1030,8 @@ static inline void prepare_page_table(vo
+ * Find the end of the first block of lowmem.
+ */
+ end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
+- if (end >= lowmem_limit)
+- end = lowmem_limit;
++ if (end >= arm_lowmem_limit)
++ end = arm_lowmem_limit;
+
+ /*
+ * Clear out all the kernel space mappings, except for the first
+@@ -1167,8 +1175,8 @@ static void __init map_lowmem(void)
+ phys_addr_t end = start + reg->size;
+ struct map_desc map;
+
+- if (end > lowmem_limit)
+- end = lowmem_limit;
++ if (end > arm_lowmem_limit)
++ end = arm_lowmem_limit;
+ if (start >= end)
+ break;
+
+@@ -1189,11 +1197,12 @@ void __init paging_init(struct machine_d
+ {
+ void *zero_page;
+
+- memblock_set_current_limit(lowmem_limit);
++ memblock_set_current_limit(arm_lowmem_limit);
+
+ build_mem_type_table();
+ prepare_page_table();
+ map_lowmem();
++ dma_contiguous_remap();
+ devicemaps_init(mdesc);
+ kmap_init();
+
diff --git a/patches.dma-mapping/arm-mm-fix-dma-pool-affiliation-check.patch b/patches.dma-mapping/arm-mm-fix-dma-pool-affiliation-check.patch
new file mode 100644
index 0000000000000..2967591e0b23c
--- /dev/null
+++ b/patches.dma-mapping/arm-mm-fix-dma-pool-affiliation-check.patch
@@ -0,0 +1,68 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:54:08 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:51:13 +0900
+Subject: [PATCH v2 56/58] arm: mm: fix DMA pool affiliation check
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-57-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+
+The __free_from_pool() function was changed in
+e9da6e9905e639b0f842a244bc770b48ad0523e9. Unfortunately, the test that
+checks whether the provided (start,size) is within the DMA pool has
+been improperly modified. It used to be:
+
+ if (start < coherent_head.vm_start || end > coherent_head.vm_end)
+
+Where coherent_head.vm_end was non-inclusive (i.e, it did not include
+the first byte after the pool). The test has been changed to:
+
+ if (start < pool->vaddr || start > pool->vaddr + pool->size)
+
+So now pool->vaddr + pool->size is inclusive (i.e, it includes the
+first byte after the pool), so the test should be >= instead of >.
+
+This bug causes the following message when freeing the *first* DMA
+coherent buffer that has been allocated, because its virtual address
+is exactly equal to pool->vaddr + pool->size :
+
+WARNING: at /home/thomas/projets/linux-2.6/arch/arm/mm/dma-mapping.c:463 __free_from_pool+0xa4/0xc0()
+freeing wrong coherent size from pool
+
+Signed-off-by: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+Cc: Marek Szyprowski <m.szyprowski@samsung.com>
+Cc: Russell King <linux@arm.linux.org.uk>
+Cc: Lior Amsalem <alior@marvell.com>
+Cc: Maen Suleiman <maen@marvell.com>
+Cc: Tawfik Bayouk <tawfik@marvell.com>
+Cc: Shadi Ammouri <shadi@marvell.com>
+Cc: Eran Ben-Avi <benavi@marvell.com>
+Cc: Yehuda Yitschak <yehuday@marvell.com>
+Cc: Nadav Haklai <nadavh@marvell.com>
+[m.szyprowski: rebased onto v3.6-rc5 and resolved conflict]
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+(cherry picked from commit f3d87524975f01b885fc3d009c6ab6afd0d00746)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+---
+ arch/arm/mm/dma-mapping.c | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
+index 94b7b78..dc16881 100644
+--- a/arch/arm/mm/dma-mapping.c
++++ b/arch/arm/mm/dma-mapping.c
+@@ -477,7 +477,7 @@ static bool __in_atomic_pool(void *start, size_t size)
+ void *pool_start = pool->vaddr;
+ void *pool_end = pool->vaddr + pool->size;
+
+- if (start < pool_start || start > pool_end)
++ if (start < pool_start || start >= pool_end)
+ return false;
+
+ if (end <= pool_end)
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/arm-mm-fix-mmu-mapping-of-cma-regions.patch b/patches.dma-mapping/arm-mm-fix-mmu-mapping-of-cma-regions.patch
new file mode 100644
index 0000000000000..0ff94d2a48dd2
--- /dev/null
+++ b/patches.dma-mapping/arm-mm-fix-mmu-mapping-of-cma-regions.patch
@@ -0,0 +1,45 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:53:23 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:50:55 +0900
+Subject: [PATCH v2 38/58] ARM: mm: fix MMU mapping of CMA regions
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-39-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Chris Brand <cbrand@broadcom.com>
+
+Fix dma_contiguous_remap() so that it continues through all the
+regions, even after encountering one that is outside lowmem.
+Without this change, if you have two CMA regions, the first outside
+lowmem and the seocnd inside lowmem, only the second one will get
+set up in the MMU. Data written to that region then doesn't get
+automatically flushed from the cache into memory.
+
+Signed-off-by: Chris Brand <cbrand@broadcom.com>
+[extended patch subject with 'fix' word]
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+(cherry picked from commit 39f78e70567a07a6fc0d7a4ca9e3331e44dd400d)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+---
+ arch/arm/mm/dma-mapping.c | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
+index 6f85d3d..cce3c0d 100644
+--- a/arch/arm/mm/dma-mapping.c
++++ b/arch/arm/mm/dma-mapping.c
+@@ -388,7 +388,7 @@ void __init dma_contiguous_remap(void)
+ if (end > arm_lowmem_limit)
+ end = arm_lowmem_limit;
+ if (start >= end)
+- return;
++ continue;
+
+ map.pfn = __phys_to_pfn(start);
+ map.virtual = __phys_to_virt(start);
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/arm-mm-fix-type-of-the-arm_dma_limit-global-variable.patch b/patches.dma-mapping/arm-mm-fix-type-of-the-arm_dma_limit-global-variable.patch
new file mode 100644
index 0000000000000..889c58c48e764
--- /dev/null
+++ b/patches.dma-mapping/arm-mm-fix-type-of-the-arm_dma_limit-global-variable.patch
@@ -0,0 +1,50 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:53:07 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:50:49 +0900
+Subject: [PATCH v2 32/58] ARM: mm: fix type of the arm_dma_limit global variable
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-33-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+
+arm_dma_limit stores physical address of maximal address accessible by DMA,
+so the phys_addr_t type makes much more sense for it instead of u32. This
+patch fixes the following build warning:
+
+arch/arm/mm/init.c:380: warning: comparison of distinct pointer types lacks a cast
+
+Reported-by: Russell King <linux@arm.linux.org.uk>
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+(cherry picked from commit 4986e5c7cd91817d0f58dd15073c9080d47980cf)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+---
+ arch/arm/mm/init.c | 2 +-
+ arch/arm/mm/mm.h | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/arm/mm/init.c
++++ b/arch/arm/mm/init.c
+@@ -212,7 +212,7 @@ EXPORT_SYMBOL(arm_dma_zone_size);
+ * allocations. This must be the smallest DMA mask in the system,
+ * so a successful GFP_DMA allocation will always satisfy this.
+ */
+-u32 arm_dma_limit;
++phys_addr_t arm_dma_limit;
+
+ static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
+ unsigned long dma_size)
+--- a/arch/arm/mm/mm.h
++++ b/arch/arm/mm/mm.h
+@@ -65,7 +65,7 @@ extern void __flush_dcache_page(struct a
+ #endif
+
+ #ifdef CONFIG_ZONE_DMA
+-extern u32 arm_dma_limit;
++extern phys_addr_t arm_dma_limit;
+ #else
+ #define arm_dma_limit ((u32)~0)
+ #endif
diff --git a/patches.dma-mapping/arm-relax-conditions-required-for-enabling-contiguous-memory-allocator.patch b/patches.dma-mapping/arm-relax-conditions-required-for-enabling-contiguous-memory-allocator.patch
new file mode 100644
index 0000000000000..d77913903a76f
--- /dev/null
+++ b/patches.dma-mapping/arm-relax-conditions-required-for-enabling-contiguous-memory-allocator.patch
@@ -0,0 +1,44 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:53:25 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:50:56 +0900
+Subject: [PATCH v2 39/58] ARM: relax conditions required for enabling Contiguous Memory Allocator
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-40-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+
+Contiguous Memory Allocator requires only paging and MMU enabled not
+particular CPU architectures, so there is no need for strict dependency
+on CPU type. This enables to use CMA on some older ARM v5 systems which
+also might need large contiguous blocks for the multimedia processing hw
+modules.
+
+Reported-by: Prabhakar Lad <prabhakar.lad@ti.com>
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Tested-by: Prabhakar Lad <prabhakar.lad@ti.com>
+(cherry picked from commit e092705bcd53de3bafc3053b0b55bf83e5d6711f)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+---
+ arch/arm/Kconfig | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
+index 5c0c50e..dce63cb 100644
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -5,7 +5,7 @@ config ARM
+ select HAVE_DMA_API_DEBUG
+ select HAVE_IDE if PCI || ISA || PCMCIA
+ select HAVE_DMA_ATTRS
+- select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7)
++ select HAVE_DMA_CONTIGUOUS if MMU
+ select HAVE_MEMBLOCK
+ select RTC_LIB
+ select SYS_SUPPORTS_APM_EMULATION
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/cma-fix-migration-mode.patch b/patches.dma-mapping/cma-fix-migration-mode.patch
new file mode 100644
index 0000000000000..620f5cb3a452f
--- /dev/null
+++ b/patches.dma-mapping/cma-fix-migration-mode.patch
@@ -0,0 +1,42 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:52:25 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:50:33 +0900
+Subject: [PATCH v2 16/58] cma: fix migration mode
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-17-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Minchan Kim <minchan.kim@gmail.com>
+
+__alloc_contig_migrate_range calls migrate_pages with wrong argument
+for migrate_mode. Fix it.
+
+Cc: Marek Szyprowski <m.szyprowski@samsung.com>
+Signed-off-by: Minchan Kim <minchan@kernel.org>
+Acked-by: Michal Nazarewicz <mina86@mina86.com>
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+(cherry picked from commit 58f42fd54144346898e6dc6d6ae3acd4c591b42f)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+---
+ mm/page_alloc.c | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 22348ae..ed85c02 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -5682,7 +5682,7 @@ static int __alloc_contig_migrate_range(unsigned long start, unsigned long end)
+
+ ret = migrate_pages(&cc.migratepages,
+ __alloc_contig_migrate_alloc,
+- 0, false, true);
++ 0, false, MIGRATE_SYNC);
+ }
+
+ putback_lru_pages(&cc.migratepages);
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/common-add-dma_mmap_from_coherent-function.patch b/patches.dma-mapping/common-add-dma_mmap_from_coherent-function.patch
new file mode 100644
index 0000000000000..5b754e4a4bf0a
--- /dev/null
+++ b/patches.dma-mapping/common-add-dma_mmap_from_coherent-function.patch
@@ -0,0 +1,115 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:52:31 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:50:35 +0900
+Subject: [PATCH v2 18/58] common: add dma_mmap_from_coherent() function
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-19-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+
+Add a common helper for dma-mapping core for mapping a coherent buffer
+to userspace.
+
+Reported-by: Subash Patel <subashrp@gmail.com>
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Acked-by: Kyungmin Park <kyungmin.park@samsung.com>
+Tested-By: Subash Patel <subash.ramaswamy@linaro.org>
+(cherry picked from commit bca0fa5f12a6744a2b2e53154af65a51402b3426)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+---
+ drivers/base/dma-coherent.c | 42 ++++++++++++++++++++++++++++++++++++
+ include/asm-generic/dma-coherent.h | 4 ++-
+ 2 files changed, 45 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
+index bb0025c..1b85949 100644
+--- a/drivers/base/dma-coherent.c
++++ b/drivers/base/dma-coherent.c
+@@ -10,6 +10,7 @@
+ struct dma_coherent_mem {
+ void *virt_base;
+ dma_addr_t device_base;
++ phys_addr_t pfn_base;
+ int size;
+ int flags;
+ unsigned long *bitmap;
+@@ -44,6 +45,7 @@ int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
+
+ dev->dma_mem->virt_base = mem_base;
+ dev->dma_mem->device_base = device_addr;
++ dev->dma_mem->pfn_base = PFN_DOWN(bus_addr);
+ dev->dma_mem->size = pages;
+ dev->dma_mem->flags = flags;
+
+@@ -176,3 +178,43 @@ int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
+ return 0;
+ }
+ EXPORT_SYMBOL(dma_release_from_coherent);
++
++/**
++ * dma_mmap_from_coherent() - try to mmap the memory allocated from
++ * per-device coherent memory pool to userspace
++ * @dev: device from which the memory was allocated
++ * @vma: vm_area for the userspace memory
++ * @vaddr: cpu address returned by dma_alloc_from_coherent
++ * @size: size of the memory buffer allocated by dma_alloc_from_coherent
++ *
++ * This checks whether the memory was allocated from the per-device
++ * coherent memory pool and if so, maps that memory to the provided vma.
++ *
++ * Returns 1 if we correctly mapped the memory, or 0 if
++ * dma_release_coherent() should proceed with mapping memory from
++ * generic pools.
++ */
++int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
++ void *vaddr, size_t size, int *ret)
++{
++ struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
++
++ if (mem && vaddr >= mem->virt_base && vaddr + size <=
++ (mem->virt_base + (mem->size << PAGE_SHIFT))) {
++ unsigned long off = vma->vm_pgoff;
++ int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
++ int user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
++ int count = size >> PAGE_SHIFT;
++
++ *ret = -ENXIO;
++ if (off < count && user_count <= count - off) {
++ unsigned pfn = mem->pfn_base + start + off;
++ *ret = remap_pfn_range(vma, vma->vm_start, pfn,
++ user_count << PAGE_SHIFT,
++ vma->vm_page_prot);
++ }
++ return 1;
++ }
++ return 0;
++}
++EXPORT_SYMBOL(dma_mmap_from_coherent);
+diff --git a/include/asm-generic/dma-coherent.h b/include/asm-generic/dma-coherent.h
+index 85a3ffa..abfb268 100644
+--- a/include/asm-generic/dma-coherent.h
++++ b/include/asm-generic/dma-coherent.h
+@@ -3,13 +3,15 @@
+
+ #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
+ /*
+- * These two functions are only for dma allocator.
++ * These three functions are only for dma allocator.
+ * Don't use them in device drivers.
+ */
+ int dma_alloc_from_coherent(struct device *dev, ssize_t size,
+ dma_addr_t *dma_handle, void **ret);
+ int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
+
++int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
++ void *cpu_addr, size_t size, int *ret);
+ /*
+ * Standard interface
+ */
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/common-dma-mapping-add-support-for-generic-dma_mmap_-calls.patch b/patches.dma-mapping/common-dma-mapping-add-support-for-generic-dma_mmap_-calls.patch
new file mode 100644
index 0000000000000..dff6e4c3c7782
--- /dev/null
+++ b/patches.dma-mapping/common-dma-mapping-add-support-for-generic-dma_mmap_-calls.patch
@@ -0,0 +1,306 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:53:38 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:51:01 +0900
+Subject: [PATCH v2 44/58] common: dma-mapping: add support for generic dma_mmap_* calls
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-45-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+
+Commit 9adc5374 ('common: dma-mapping: introduce mmap method') added a
+generic method for implementing mmap user call to dma_map_ops structure.
+
+This patch converts ARM and PowerPC architectures (the only providers of
+dma_mmap_coherent/dma_mmap_writecombine calls) to use this generic
+dma_map_ops based call and adds a generic cross architecture
+definition for dma_mmap_attrs, dma_mmap_coherent, dma_mmap_writecombine
+functions.
+
+The generic mmap virt_to_page-based fallback implementation is provided for
+architectures which don't provide their own implementation for mmap method.
+
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Reviewed-by: Kyungmin Park <kyungmin.park@samsung.com>
+(cherry picked from commit 64ccc9c033c6089b2d426dad3c56477ab066c999)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+---
+ arch/arm/include/asm/dma-mapping.h | 19 ---------------
+ arch/powerpc/include/asm/dma-mapping.h | 8 +++---
+ arch/powerpc/kernel/dma-iommu.c | 1 +
+ arch/powerpc/kernel/dma-swiotlb.c | 1 +
+ arch/powerpc/kernel/dma.c | 36 +++++++++++++++-------------
+ arch/powerpc/kernel/vio.c | 1 +
+ drivers/base/dma-mapping.c | 31 +++++++++++++++++++++++++
+ include/asm-generic/dma-coherent.h | 1 +
+ include/asm-generic/dma-mapping-common.h | 37 ++++++++++++++++++++++++++++++
+ 9 files changed, 95 insertions(+), 40 deletions(-)
+
+diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
+index 80777d87..a048033 100644
+--- a/arch/arm/include/asm/dma-mapping.h
++++ b/arch/arm/include/asm/dma-mapping.h
+@@ -186,17 +186,6 @@ extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ struct dma_attrs *attrs);
+
+-#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
+-
+-static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
+- void *cpu_addr, dma_addr_t dma_addr,
+- size_t size, struct dma_attrs *attrs)
+-{
+- struct dma_map_ops *ops = get_dma_ops(dev);
+- BUG_ON(!ops);
+- return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
+-}
+-
+ static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+ {
+@@ -213,14 +202,6 @@ static inline void dma_free_writecombine(struct device *dev, size_t size,
+ return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
+ }
+
+-static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
+- void *cpu_addr, dma_addr_t dma_addr, size_t size)
+-{
+- DEFINE_DMA_ATTRS(attrs);
+- dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
+- return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
+-}
+-
+ /*
+ * This can be called during boot to increase the size of the consistent
+ * DMA region above it's default value of 2MB. It must be called before the
+diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
+index 62678e3..7816087 100644
+--- a/arch/powerpc/include/asm/dma-mapping.h
++++ b/arch/powerpc/include/asm/dma-mapping.h
+@@ -27,7 +27,10 @@ extern void *dma_direct_alloc_coherent(struct device *dev, size_t size,
+ extern void dma_direct_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs);
+-
++extern int dma_direct_mmap_coherent(struct device *dev,
++ struct vm_area_struct *vma,
++ void *cpu_addr, dma_addr_t handle,
++ size_t size, struct dma_attrs *attrs);
+
+ #ifdef CONFIG_NOT_COHERENT_CACHE
+ /*
+@@ -207,11 +210,8 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
+ #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+ #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+
+-extern int dma_mmap_coherent(struct device *, struct vm_area_struct *,
+- void *, dma_addr_t, size_t);
+ #define ARCH_HAS_DMA_MMAP_COHERENT
+
+-
+ static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction)
+ {
+diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
+index bcfdcd2..2d7bb8c 100644
+--- a/arch/powerpc/kernel/dma-iommu.c
++++ b/arch/powerpc/kernel/dma-iommu.c
+@@ -109,6 +109,7 @@ static u64 dma_iommu_get_required_mask(struct device *dev)
+ struct dma_map_ops dma_iommu_ops = {
+ .alloc = dma_iommu_alloc_coherent,
+ .free = dma_iommu_free_coherent,
++ .mmap = dma_direct_mmap_coherent,
+ .map_sg = dma_iommu_map_sg,
+ .unmap_sg = dma_iommu_unmap_sg,
+ .dma_supported = dma_iommu_dma_supported,
+diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
+index 4ab88da..4694365 100644
+--- a/arch/powerpc/kernel/dma-swiotlb.c
++++ b/arch/powerpc/kernel/dma-swiotlb.c
+@@ -49,6 +49,7 @@ static u64 swiotlb_powerpc_get_required(struct device *dev)
+ struct dma_map_ops swiotlb_dma_ops = {
+ .alloc = dma_direct_alloc_coherent,
+ .free = dma_direct_free_coherent,
++ .mmap = dma_direct_mmap_coherent,
+ .map_sg = swiotlb_map_sg_attrs,
+ .unmap_sg = swiotlb_unmap_sg_attrs,
+ .dma_supported = swiotlb_dma_supported,
+diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
+index b1ec983..062bf20 100644
+--- a/arch/powerpc/kernel/dma.c
++++ b/arch/powerpc/kernel/dma.c
+@@ -65,6 +65,24 @@ void dma_direct_free_coherent(struct device *dev, size_t size,
+ #endif
+ }
+
++int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
++ void *cpu_addr, dma_addr_t handle, size_t size,
++ struct dma_attrs *attrs)
++{
++ unsigned long pfn;
++
++#ifdef CONFIG_NOT_COHERENT_CACHE
++ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
++ pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr);
++#else
++ pfn = page_to_pfn(virt_to_page(cpu_addr));
++#endif
++ return remap_pfn_range(vma, vma->vm_start,
++ pfn + vma->vm_pgoff,
++ vma->vm_end - vma->vm_start,
++ vma->vm_page_prot);
++}
++
+ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
+ int nents, enum dma_data_direction direction,
+ struct dma_attrs *attrs)
+@@ -154,6 +172,7 @@ static inline void dma_direct_sync_single(struct device *dev,
+ struct dma_map_ops dma_direct_ops = {
+ .alloc = dma_direct_alloc_coherent,
+ .free = dma_direct_free_coherent,
++ .mmap = dma_direct_mmap_coherent,
+ .map_sg = dma_direct_map_sg,
+ .unmap_sg = dma_direct_unmap_sg,
+ .dma_supported = dma_direct_dma_supported,
+@@ -211,20 +230,3 @@ static int __init dma_init(void)
+ }
+ fs_initcall(dma_init);
+
+-int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
+- void *cpu_addr, dma_addr_t handle, size_t size)
+-{
+- unsigned long pfn;
+-
+-#ifdef CONFIG_NOT_COHERENT_CACHE
+- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+- pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr);
+-#else
+- pfn = page_to_pfn(virt_to_page(cpu_addr));
+-#endif
+- return remap_pfn_range(vma, vma->vm_start,
+- pfn + vma->vm_pgoff,
+- vma->vm_end - vma->vm_start,
+- vma->vm_page_prot);
+-}
+-EXPORT_SYMBOL_GPL(dma_mmap_coherent);
+diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
+index ea6081a..544a03a 100644
+--- a/arch/powerpc/kernel/vio.c
++++ b/arch/powerpc/kernel/vio.c
+@@ -611,6 +611,7 @@ static u64 vio_dma_get_required_mask(struct device *dev)
+ struct dma_map_ops vio_dma_mapping_ops = {
+ .alloc = vio_dma_iommu_alloc_coherent,
+ .free = vio_dma_iommu_free_coherent,
++ .mmap = dma_direct_mmap_coherent,
+ .map_sg = vio_dma_iommu_map_sg,
+ .unmap_sg = vio_dma_iommu_unmap_sg,
+ .map_page = vio_dma_iommu_map_page,
+diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
+index 6f3676f..db5db02 100644
+--- a/drivers/base/dma-mapping.c
++++ b/drivers/base/dma-mapping.c
+@@ -10,6 +10,7 @@
+ #include <linux/dma-mapping.h>
+ #include <linux/export.h>
+ #include <linux/gfp.h>
++#include <asm-generic/dma-coherent.h>
+
+ /*
+ * Managed DMA API
+@@ -218,3 +219,33 @@ void dmam_release_declared_memory(struct device *dev)
+ EXPORT_SYMBOL(dmam_release_declared_memory);
+
+ #endif
++
++/*
++ * Create userspace mapping for the DMA-coherent memory.
++ */
++int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
++ void *cpu_addr, dma_addr_t dma_addr, size_t size)
++{
++ int ret = -ENXIO;
++#ifdef CONFIG_MMU
++ unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
++ unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
++ unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
++ unsigned long off = vma->vm_pgoff;
++
++ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
++
++ if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
++ return ret;
++
++ if (off < count && user_count <= (count - off)) {
++ ret = remap_pfn_range(vma, vma->vm_start,
++ pfn + off,
++ user_count << PAGE_SHIFT,
++ vma->vm_page_prot);
++ }
++#endif /* CONFIG_MMU */
++
++ return ret;
++}
++EXPORT_SYMBOL(dma_common_mmap);
+diff --git a/include/asm-generic/dma-coherent.h b/include/asm-generic/dma-coherent.h
+index abfb268..2be8a2d 100644
+--- a/include/asm-generic/dma-coherent.h
++++ b/include/asm-generic/dma-coherent.h
+@@ -29,6 +29,7 @@ dma_mark_declared_memory_occupied(struct device *dev,
+ #else
+ #define dma_alloc_from_coherent(dev, size, handle, ret) (0)
+ #define dma_release_from_coherent(dev, order, vaddr) (0)
++#define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
+ #endif
+
+ #endif
+diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
+index 2e248d8..9073aeb 100644
+--- a/include/asm-generic/dma-mapping-common.h
++++ b/include/asm-generic/dma-mapping-common.h
+@@ -176,4 +176,41 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
+ #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
+
++extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
++ void *cpu_addr, dma_addr_t dma_addr, size_t size);
++
++/**
++ * dma_mmap_attrs - map a coherent DMA allocation into user space
++ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
++ * @vma: vm_area_struct describing requested user mapping
++ * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
++ * @handle: device-view address returned from dma_alloc_attrs
++ * @size: size of memory originally requested in dma_alloc_attrs
++ * @attrs: attributes of mapping properties requested in dma_alloc_attrs
++ *
++ * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
++ * into user space. The coherent DMA buffer must not be freed by the
++ * driver until the user space mapping has been released.
++ */
++static inline int
++dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
++ dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
++{
++ struct dma_map_ops *ops = get_dma_ops(dev);
++ BUG_ON(!ops);
++ if (ops->mmap)
++ return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
++ return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
++}
++
++#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
++
++static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
++ void *cpu_addr, dma_addr_t dma_addr, size_t size)
++{
++ DEFINE_DMA_ATTRS(attrs);
++ dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
++ return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
++}
++
+ #endif
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/driver-core-fix-some-kernel-doc-warnings-in-dma-.c.patch b/patches.dma-mapping/driver-core-fix-some-kernel-doc-warnings-in-dma-.c.patch
new file mode 100644
index 0000000000000..686b432e0d363
--- /dev/null
+++ b/patches.dma-mapping/driver-core-fix-some-kernel-doc-warnings-in-dma-.c.patch
@@ -0,0 +1,47 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:53:45 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:51:04 +0900
+Subject: [PATCH v2 47/58] driver core: fix some kernel-doc warnings in dma*.c
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-48-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Randy Dunlap <rdunlap@xenotime.net>
+
+Fix kernel-doc warnings in drivers/base/dma*.c:
+
+Warning(drivers/base/dma-buf.c:498): No description found for parameter 'vaddr'
+Warning(drivers/base/dma-coherent.c:199): No description found for parameter 'ret'
+
+Signed-off-by: Randy Dunlap <rdunlap@xenotime.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+(cherry picked from commit 6e7b4a59b3d7bb2dcd11c019354bf0c91037dadd)
+
+Conflicts:
+
+ drivers/base/dma-buf.c
+
+Backported patch only addresses dma-coherent.c warning, as dma-buf.c warning is
+not present in 3.4 kernel.
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+---
+ drivers/base/dma-coherent.c | 1 +
+ 1 files changed, 1 insertions(+), 0 deletions(-)
+
+diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
+index 1b85949..560a717 100644
+--- a/drivers/base/dma-coherent.c
++++ b/drivers/base/dma-coherent.c
+@@ -186,6 +186,7 @@ EXPORT_SYMBOL(dma_release_from_coherent);
+ * @vma: vm_area for the userspace memory
+ * @vaddr: cpu address returned by dma_alloc_from_coherent
+ * @size: size of the memory buffer allocated by dma_alloc_from_coherent
++ * @ret: result from remap_pfn_range()
+ *
+ * This checks whether the memory was allocated from the per-device
+ * coherent memory pool and if so, maps that memory to the provided vma.
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/drivers-add-contiguous-memory-allocator.patch b/patches.dma-mapping/drivers-add-contiguous-memory-allocator.patch
new file mode 100644
index 0000000000000..4061998d81e64
--- /dev/null
+++ b/patches.dma-mapping/drivers-add-contiguous-memory-allocator.patch
@@ -0,0 +1,745 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:52:19 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:50:30 +0900
+Subject: [PATCH v2 13/58] drivers: add Contiguous Memory Allocator
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-14-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+
+The Contiguous Memory Allocator is a set of helper functions for DMA
+mapping framework that improves allocations of contiguous memory chunks.
+
+CMA grabs memory on system boot, marks it with MIGRATE_CMA migrate type
+and gives back to the system. Kernel is allowed to allocate only movable
+pages within CMA's managed memory so that it can be used for example for
+page cache when DMA mapping do not use it. On
+dma_alloc_from_contiguous() request such pages are migrated out of CMA
+area to free required contiguous block and fulfill the request. This
+allows to allocate large contiguous chunks of memory at any time
+assuming that there is enough free memory available in the system.
+
+This code is heavily based on earlier works by Michal Nazarewicz.
+
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
+Signed-off-by: Michal Nazarewicz <mina86@mina86.com>
+Acked-by: Arnd Bergmann <arnd@arndb.de>
+Tested-by: Rob Clark <rob.clark@linaro.org>
+Tested-by: Ohad Ben-Cohen <ohad@wizery.com>
+Tested-by: Benjamin Gaignard <benjamin.gaignard@linaro.org>
+Tested-by: Robert Nelson <robertcnelson@gmail.com>
+Tested-by: Barry Song <Baohua.Song@csr.com>
+(cherry picked from commit c64be2bb1c6eb43c838b2c6d57b074078be208dd)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+---
+ Documentation/kernel-parameters.txt | 5
+ arch/Kconfig | 3
+ drivers/base/Kconfig | 89 +++++++
+ drivers/base/Makefile | 1
+ drivers/base/dma-contiguous.c | 401 +++++++++++++++++++++++++++++++++++
+ include/asm-generic/dma-contiguous.h | 28 ++
+ include/linux/device.h | 4
+ include/linux/dma-contiguous.h | 110 +++++++++
+ 8 files changed, 641 insertions(+)
+ create mode 100644 drivers/base/dma-contiguous.c
+ create mode 100644 include/asm-generic/dma-contiguous.h
+ create mode 100644 include/linux/dma-contiguous.h
+
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -508,6 +508,11 @@ bytes respectively. Such letter suffixes
+ Also note the kernel might malfunction if you disable
+ some critical bits.
+
++ cma=nn[MG] [ARM,KNL]
++ Sets the size of kernel global memory area for contiguous
++ memory allocations. For more information, see
++ include/linux/dma-contiguous.h
++
+ cmo_free_hint= [PPC] Format: { yes | no }
+ Specify whether pages are marked as being inactive
+ when they are freed. This is used in CMO environments
+--- a/arch/Kconfig
++++ b/arch/Kconfig
+@@ -145,6 +145,9 @@ config HAVE_ARCH_TRACEHOOK
+ config HAVE_DMA_ATTRS
+ bool
+
++config HAVE_DMA_CONTIGUOUS
++ bool
++
+ config USE_GENERIC_SMP_HELPERS
+ bool
+
+--- a/drivers/base/Kconfig
++++ b/drivers/base/Kconfig
+@@ -192,4 +192,93 @@ config DMA_SHARED_BUFFER
+ APIs extension; the file's descriptor can then be passed on to other
+ driver.
+
++config CMA
++ bool "Contiguous Memory Allocator (EXPERIMENTAL)"
++ depends on HAVE_DMA_CONTIGUOUS && HAVE_MEMBLOCK && EXPERIMENTAL
++ select MIGRATION
++ help
++ This enables the Contiguous Memory Allocator which allows drivers
++ to allocate big physically-contiguous blocks of memory for use with
++ hardware components that do not support I/O map nor scatter-gather.
++
++ For more information see <include/linux/dma-contiguous.h>.
++ If unsure, say "n".
++
++if CMA
++
++config CMA_DEBUG
++ bool "CMA debug messages (DEVELOPMENT)"
++ depends on DEBUG_KERNEL
++ help
++ Turns on debug messages in CMA. This produces KERN_DEBUG
++ messages for every CMA call as well as various messages while
++ processing calls such as dma_alloc_from_contiguous().
++ This option does not affect warning and error messages.
++
++comment "Default contiguous memory area size:"
++
++config CMA_SIZE_MBYTES
++ int "Size in Mega Bytes"
++ depends on !CMA_SIZE_SEL_PERCENTAGE
++ default 16
++ help
++ Defines the size (in MiB) of the default memory area for Contiguous
++ Memory Allocator.
++
++config CMA_SIZE_PERCENTAGE
++ int "Percentage of total memory"
++ depends on !CMA_SIZE_SEL_MBYTES
++ default 10
++ help
++ Defines the size of the default memory area for Contiguous Memory
++ Allocator as a percentage of the total memory in the system.
++
++choice
++ prompt "Selected region size"
++ default CMA_SIZE_SEL_ABSOLUTE
++
++config CMA_SIZE_SEL_MBYTES
++ bool "Use mega bytes value only"
++
++config CMA_SIZE_SEL_PERCENTAGE
++ bool "Use percentage value only"
++
++config CMA_SIZE_SEL_MIN
++ bool "Use lower value (minimum)"
++
++config CMA_SIZE_SEL_MAX
++ bool "Use higher value (maximum)"
++
++endchoice
++
++config CMA_ALIGNMENT
++ int "Maximum PAGE_SIZE order of alignment for contiguous buffers"
++ range 4 9
++ default 8
++ help
++ DMA mapping framework by default aligns all buffers to the smallest
++ PAGE_SIZE order which is greater than or equal to the requested buffer
++ size. This works well for buffers up to a few hundreds kilobytes, but
++ for larger buffers it just a memory waste. With this parameter you can
++ specify the maximum PAGE_SIZE order for contiguous buffers. Larger
++ buffers will be aligned only to this specified order. The order is
++ expressed as a power of two multiplied by the PAGE_SIZE.
++
++ For example, if your system defaults to 4KiB pages, the order value
++ of 8 means that the buffers will be aligned up to 1MiB only.
++
++ If unsure, leave the default value "8".
++
++config CMA_AREAS
++ int "Maximum count of the CMA device-private areas"
++ default 7
++ help
++ CMA allows to create CMA areas for particular devices. This parameter
++ sets the maximum number of such device private CMA areas in the
++ system.
++
++ If unsure, leave the default value "7".
++
++endif
++
+ endmenu
+--- a/drivers/base/Makefile
++++ b/drivers/base/Makefile
+@@ -6,6 +6,7 @@ obj-y := core.o bus.o dd.o syscore.o \
+ attribute_container.o transport_class.o \
+ topology.o
+ obj-$(CONFIG_DEVTMPFS) += devtmpfs.o
++obj-$(CONFIG_CMA) += dma-contiguous.o
+ obj-y += power/
+ obj-$(CONFIG_HAS_DMA) += dma-mapping.o
+ obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
+--- /dev/null
++++ b/drivers/base/dma-contiguous.c
+@@ -0,0 +1,401 @@
++/*
++ * Contiguous Memory Allocator for DMA mapping framework
++ * Copyright (c) 2010-2011 by Samsung Electronics.
++ * Written by:
++ * Marek Szyprowski <m.szyprowski@samsung.com>
++ * Michal Nazarewicz <mina86@mina86.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License or (at your optional) any later version of the license.
++ */
++
++#define pr_fmt(fmt) "cma: " fmt
++
++#ifdef CONFIG_CMA_DEBUG
++#ifndef DEBUG
++# define DEBUG
++#endif
++#endif
++
++#include <asm/page.h>
++#include <asm/dma-contiguous.h>
++
++#include <linux/memblock.h>
++#include <linux/err.h>
++#include <linux/mm.h>
++#include <linux/mutex.h>
++#include <linux/page-isolation.h>
++#include <linux/slab.h>
++#include <linux/swap.h>
++#include <linux/mm_types.h>
++#include <linux/dma-contiguous.h>
++
++#ifndef SZ_1M
++#define SZ_1M (1 << 20)
++#endif
++
++struct cma {
++ unsigned long base_pfn;
++ unsigned long count;
++ unsigned long *bitmap;
++};
++
++struct cma *dma_contiguous_default_area;
++
++#ifdef CONFIG_CMA_SIZE_MBYTES
++#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
++#else
++#define CMA_SIZE_MBYTES 0
++#endif
++
++/*
++ * Default global CMA area size can be defined in kernel's .config.
++ * This is usefull mainly for distro maintainers to create a kernel
++ * that works correctly for most supported systems.
++ * The size can be set in bytes or as a percentage of the total memory
++ * in the system.
++ *
++ * Users, who want to set the size of global CMA area for their system
++ * should use cma= kernel parameter.
++ */
++static const unsigned long size_bytes = CMA_SIZE_MBYTES * SZ_1M;
++static long size_cmdline = -1;
++
++static int __init early_cma(char *p)
++{
++ pr_debug("%s(%s)\n", __func__, p);
++ size_cmdline = memparse(p, &p);
++ return 0;
++}
++early_param("cma", early_cma);
++
++#ifdef CONFIG_CMA_SIZE_PERCENTAGE
++
++static unsigned long __init __maybe_unused cma_early_percent_memory(void)
++{
++ struct memblock_region *reg;
++ unsigned long total_pages = 0;
++
++ /*
++ * We cannot use memblock_phys_mem_size() here, because
++ * memblock_analyze() has not been called yet.
++ */
++ for_each_memblock(memory, reg)
++ total_pages += memblock_region_memory_end_pfn(reg) -
++ memblock_region_memory_base_pfn(reg);
++
++ return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT;
++}
++
++#else
++
++static inline __maybe_unused unsigned long cma_early_percent_memory(void)
++{
++ return 0;
++}
++
++#endif
++
++/**
++ * dma_contiguous_reserve() - reserve area for contiguous memory handling
++ * @limit: End address of the reserved memory (optional, 0 for any).
++ *
++ * This function reserves memory from early allocator. It should be
++ * called by arch specific code once the early allocator (memblock or bootmem)
++ * has been activated and all other subsystems have already allocated/reserved
++ * memory.
++ */
++void __init dma_contiguous_reserve(phys_addr_t limit)
++{
++ unsigned long selected_size = 0;
++
++ pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
++
++ if (size_cmdline != -1) {
++ selected_size = size_cmdline;
++ } else {
++#ifdef CONFIG_CMA_SIZE_SEL_MBYTES
++ selected_size = size_bytes;
++#elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
++ selected_size = cma_early_percent_memory();
++#elif defined(CONFIG_CMA_SIZE_SEL_MIN)
++ selected_size = min(size_bytes, cma_early_percent_memory());
++#elif defined(CONFIG_CMA_SIZE_SEL_MAX)
++ selected_size = max(size_bytes, cma_early_percent_memory());
++#endif
++ }
++
++ if (selected_size) {
++ pr_debug("%s: reserving %ld MiB for global area\n", __func__,
++ selected_size / SZ_1M);
++
++ dma_declare_contiguous(NULL, selected_size, 0, limit);
++ }
++};
++
++static DEFINE_MUTEX(cma_mutex);
++
++static __init int cma_activate_area(unsigned long base_pfn, unsigned long count)
++{
++ unsigned long pfn = base_pfn;
++ unsigned i = count >> pageblock_order;
++ struct zone *zone;
++
++ WARN_ON_ONCE(!pfn_valid(pfn));
++ zone = page_zone(pfn_to_page(pfn));
++
++ do {
++ unsigned j;
++ base_pfn = pfn;
++ for (j = pageblock_nr_pages; j; --j, pfn++) {
++ WARN_ON_ONCE(!pfn_valid(pfn));
++ if (page_zone(pfn_to_page(pfn)) != zone)
++ return -EINVAL;
++ }
++ init_cma_reserved_pageblock(pfn_to_page(base_pfn));
++ } while (--i);
++ return 0;
++}
++
++static __init struct cma *cma_create_area(unsigned long base_pfn,
++ unsigned long count)
++{
++ int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
++ struct cma *cma;
++ int ret = -ENOMEM;
++
++ pr_debug("%s(base %08lx, count %lx)\n", __func__, base_pfn, count);
++
++ cma = kmalloc(sizeof *cma, GFP_KERNEL);
++ if (!cma)
++ return ERR_PTR(-ENOMEM);
++
++ cma->base_pfn = base_pfn;
++ cma->count = count;
++ cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
++
++ if (!cma->bitmap)
++ goto no_mem;
++
++ ret = cma_activate_area(base_pfn, count);
++ if (ret)
++ goto error;
++
++ pr_debug("%s: returned %p\n", __func__, (void *)cma);
++ return cma;
++
++error:
++ kfree(cma->bitmap);
++no_mem:
++ kfree(cma);
++ return ERR_PTR(ret);
++}
++
++static struct cma_reserved {
++ phys_addr_t start;
++ unsigned long size;
++ struct device *dev;
++} cma_reserved[MAX_CMA_AREAS] __initdata;
++static unsigned cma_reserved_count __initdata;
++
++static int __init cma_init_reserved_areas(void)
++{
++ struct cma_reserved *r = cma_reserved;
++ unsigned i = cma_reserved_count;
++
++ pr_debug("%s()\n", __func__);
++
++ for (; i; --i, ++r) {
++ struct cma *cma;
++ cma = cma_create_area(PFN_DOWN(r->start),
++ r->size >> PAGE_SHIFT);
++ if (!IS_ERR(cma))
++ dev_set_cma_area(r->dev, cma);
++ }
++ return 0;
++}
++core_initcall(cma_init_reserved_areas);
++
++/**
++ * dma_declare_contiguous() - reserve area for contiguous memory handling
++ * for particular device
++ * @dev: Pointer to device structure.
++ * @size: Size of the reserved memory.
++ * @base: Start address of the reserved memory (optional, 0 for any).
++ * @limit: End address of the reserved memory (optional, 0 for any).
++ *
++ * This function reserves memory for specified device. It should be
++ * called by board specific code when early allocator (memblock or bootmem)
++ * is still activate.
++ */
++int __init dma_declare_contiguous(struct device *dev, unsigned long size,
++ phys_addr_t base, phys_addr_t limit)
++{
++ struct cma_reserved *r = &cma_reserved[cma_reserved_count];
++ unsigned long alignment;
++
++ pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
++ (unsigned long)size, (unsigned long)base,
++ (unsigned long)limit);
++
++ /* Sanity checks */
++ if (cma_reserved_count == ARRAY_SIZE(cma_reserved)) {
++ pr_err("Not enough slots for CMA reserved regions!\n");
++ return -ENOSPC;
++ }
++
++ if (!size)
++ return -EINVAL;
++
++ /* Sanitise input arguments */
++ alignment = PAGE_SIZE << max(MAX_ORDER, pageblock_order);
++ base = ALIGN(base, alignment);
++ size = ALIGN(size, alignment);
++ limit &= ~(alignment - 1);
++
++ /* Reserve memory */
++ if (base) {
++ if (memblock_is_region_reserved(base, size) ||
++ memblock_reserve(base, size) < 0) {
++ base = -EBUSY;
++ goto err;
++ }
++ } else {
++ /*
++ * Use __memblock_alloc_base() since
++ * memblock_alloc_base() panic()s.
++ */
++ phys_addr_t addr = __memblock_alloc_base(size, alignment, limit);
++ if (!addr) {
++ base = -ENOMEM;
++ goto err;
++ } else if (addr + size > ~(unsigned long)0) {
++ memblock_free(addr, size);
++ base = -EINVAL;
++ goto err;
++ } else {
++ base = addr;
++ }
++ }
++
++ /*
++ * Each reserved area must be initialised later, when more kernel
++ * subsystems (like slab allocator) are available.
++ */
++ r->start = base;
++ r->size = size;
++ r->dev = dev;
++ cma_reserved_count++;
++ pr_info("CMA: reserved %ld MiB at %08lx\n", size / SZ_1M,
++ (unsigned long)base);
++
++ /* Architecture specific contiguous memory fixup. */
++ dma_contiguous_early_fixup(base, size);
++ return 0;
++err:
++ pr_err("CMA: failed to reserve %ld MiB\n", size / SZ_1M);
++ return base;
++}
++
++/**
++ * dma_alloc_from_contiguous() - allocate pages from contiguous area
++ * @dev: Pointer to device for which the allocation is performed.
++ * @count: Requested number of pages.
++ * @align: Requested alignment of pages (in PAGE_SIZE order).
++ *
++ * This function allocates memory buffer for specified device. It uses
++ * device specific contiguous memory area if available or the default
++ * global one. Requires architecture specific get_dev_cma_area() helper
++ * function.
++ */
++struct page *dma_alloc_from_contiguous(struct device *dev, int count,
++ unsigned int align)
++{
++ unsigned long mask, pfn, pageno, start = 0;
++ struct cma *cma = dev_get_cma_area(dev);
++ int ret;
++
++ if (!cma || !cma->count)
++ return NULL;
++
++ if (align > CONFIG_CMA_ALIGNMENT)
++ align = CONFIG_CMA_ALIGNMENT;
++
++ pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
++ count, align);
++
++ if (!count)
++ return NULL;
++
++ mask = (1 << align) - 1;
++
++ mutex_lock(&cma_mutex);
++
++ for (;;) {
++ pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
++ start, count, mask);
++ if (pageno >= cma->count) {
++ ret = -ENOMEM;
++ goto error;
++ }
++
++ pfn = cma->base_pfn + pageno;
++ ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
++ if (ret == 0) {
++ bitmap_set(cma->bitmap, pageno, count);
++ break;
++ } else if (ret != -EBUSY) {
++ goto error;
++ }
++ pr_debug("%s(): memory range at %p is busy, retrying\n",
++ __func__, pfn_to_page(pfn));
++ /* try again with a bit different memory target */
++ start = pageno + mask + 1;
++ }
++
++ mutex_unlock(&cma_mutex);
++
++ pr_debug("%s(): returned %p\n", __func__, pfn_to_page(pfn));
++ return pfn_to_page(pfn);
++error:
++ mutex_unlock(&cma_mutex);
++ return NULL;
++}
++
++/**
++ * dma_release_from_contiguous() - release allocated pages
++ * @dev: Pointer to device for which the pages were allocated.
++ * @pages: Allocated pages.
++ * @count: Number of allocated pages.
++ *
++ * This function releases memory allocated by dma_alloc_from_contiguous().
++ * It returns false when provided pages do not belong to contiguous area and
++ * true otherwise.
++ */
++bool dma_release_from_contiguous(struct device *dev, struct page *pages,
++ int count)
++{
++ struct cma *cma = dev_get_cma_area(dev);
++ unsigned long pfn;
++
++ if (!cma || !pages)
++ return false;
++
++ pr_debug("%s(page %p)\n", __func__, (void *)pages);
++
++ pfn = page_to_pfn(pages);
++
++ if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
++ return false;
++
++ VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
++
++ mutex_lock(&cma_mutex);
++ bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
++ free_contig_range(pfn, count);
++ mutex_unlock(&cma_mutex);
++
++ return true;
++}
+--- /dev/null
++++ b/include/asm-generic/dma-contiguous.h
+@@ -0,0 +1,28 @@
++#ifndef ASM_DMA_CONTIGUOUS_H
++#define ASM_DMA_CONTIGUOUS_H
++
++#ifdef __KERNEL__
++#ifdef CONFIG_CMA
++
++#include <linux/device.h>
++#include <linux/dma-contiguous.h>
++
++static inline struct cma *dev_get_cma_area(struct device *dev)
++{
++ if (dev && dev->cma_area)
++ return dev->cma_area;
++ return dma_contiguous_default_area;
++}
++
++static inline void dev_set_cma_area(struct device *dev, struct cma *cma)
++{
++ if (dev)
++ dev->cma_area = cma;
++ if (!dev || !dma_contiguous_default_area)
++ dma_contiguous_default_area = cma;
++}
++
++#endif
++#endif
++
++#endif
+--- a/include/linux/device.h
++++ b/include/linux/device.h
+@@ -661,6 +661,10 @@ struct device {
+
+ struct dma_coherent_mem *dma_mem; /* internal for coherent mem
+ override */
++#ifdef CONFIG_CMA
++ struct cma *cma_area; /* contiguous memory area for dma
++ allocations */
++#endif
+ /* arch specific additions */
+ struct dev_archdata archdata;
+
+--- /dev/null
++++ b/include/linux/dma-contiguous.h
+@@ -0,0 +1,110 @@
++#ifndef __LINUX_CMA_H
++#define __LINUX_CMA_H
++
++/*
++ * Contiguous Memory Allocator for DMA mapping framework
++ * Copyright (c) 2010-2011 by Samsung Electronics.
++ * Written by:
++ * Marek Szyprowski <m.szyprowski@samsung.com>
++ * Michal Nazarewicz <mina86@mina86.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License or (at your optional) any later version of the license.
++ */
++
++/*
++ * Contiguous Memory Allocator
++ *
++ * The Contiguous Memory Allocator (CMA) makes it possible to
++ * allocate big contiguous chunks of memory after the system has
++ * booted.
++ *
++ * Why is it needed?
++ *
++ * Various devices on embedded systems have no scatter-getter and/or
++ * IO map support and require contiguous blocks of memory to
++ * operate. They include devices such as cameras, hardware video
++ * coders, etc.
++ *
++ * Such devices often require big memory buffers (a full HD frame
++ * is, for instance, more then 2 mega pixels large, i.e. more than 6
++ * MB of memory), which makes mechanisms such as kmalloc() or
++ * alloc_page() ineffective.
++ *
++ * At the same time, a solution where a big memory region is
++ * reserved for a device is suboptimal since often more memory is
++ * reserved then strictly required and, moreover, the memory is
++ * inaccessible to page system even if device drivers don't use it.
++ *
++ * CMA tries to solve this issue by operating on memory regions
++ * where only movable pages can be allocated from. This way, kernel
++ * can use the memory for pagecache and when device driver requests
++ * it, allocated pages can be migrated.
++ *
++ * Driver usage
++ *
++ * CMA should not be used by the device drivers directly. It is
++ * only a helper framework for dma-mapping subsystem.
++ *
++ * For more information, see kernel-docs in drivers/base/dma-contiguous.c
++ */
++
++#ifdef __KERNEL__
++
++struct cma;
++struct page;
++struct device;
++
++#ifdef CONFIG_CMA
++
++/*
++ * There is always at least global CMA area and a few optional device
++ * private areas configured in kernel .config.
++ */
++#define MAX_CMA_AREAS (1 + CONFIG_CMA_AREAS)
++
++extern struct cma *dma_contiguous_default_area;
++
++void dma_contiguous_reserve(phys_addr_t addr_limit);
++int dma_declare_contiguous(struct device *dev, unsigned long size,
++ phys_addr_t base, phys_addr_t limit);
++
++struct page *dma_alloc_from_contiguous(struct device *dev, int count,
++ unsigned int order);
++bool dma_release_from_contiguous(struct device *dev, struct page *pages,
++ int count);
++
++#else
++
++#define MAX_CMA_AREAS (0)
++
++static inline void dma_contiguous_reserve(phys_addr_t limit) { }
++
++static inline
++int dma_declare_contiguous(struct device *dev, unsigned long size,
++ phys_addr_t base, phys_addr_t limit)
++{
++ return -ENOSYS;
++}
++
++static inline
++struct page *dma_alloc_from_contiguous(struct device *dev, int count,
++ unsigned int order)
++{
++ return NULL;
++}
++
++static inline
++bool dma_release_from_contiguous(struct device *dev, struct page *pages,
++ int count)
++{
++ return false;
++}
++
++#endif
++
++#endif
++
++#endif
diff --git a/patches.dma-mapping/iommu-core-pass-a-user-provided-token-to-fault-handlers.patch b/patches.dma-mapping/iommu-core-pass-a-user-provided-token-to-fault-handlers.patch
new file mode 100644
index 0000000000000..dde1a1ccb224e
--- /dev/null
+++ b/patches.dma-mapping/iommu-core-pass-a-user-provided-token-to-fault-handlers.patch
@@ -0,0 +1,143 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:53:04 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:50:47 +0900
+Subject: [PATCH v2 30/58] iommu/core: pass a user-provided token to fault handlers
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-31-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Ohad Ben-Cohen <ohad@wizery.com>
+
+Sometimes a single IOMMU user may have to deal with several
+different IOMMU devices (e.g. remoteproc).
+
+When an IOMMU fault happens, such users have to regain their
+context in order to deal with the fault.
+
+Users can't use the private fields of neither the iommu_domain nor
+the IOMMU device, because those are already used by the IOMMU core
+and low level driver (respectively).
+
+This patch just simply allows users to pass a private token (most
+notably their own context pointer) to iommu_set_fault_handler(),
+and then makes sure it is provided back to the users whenever
+an IOMMU fault happens.
+
+The patch also adopts remoteproc to the new fault handling
+interface, but the real functionality using this (recovery of
+remote processors) will only be added later in a subsequent patch
+set.
+
+Cc: Fernando Guzman Lugo <fernando.lugo@ti.com>
+Signed-off-by: Ohad Ben-Cohen <ohad@wizery.com>
+Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
+(cherry picked from commit 77ca23323594589ac8cba1c8d59bfe7e85d3cb8b)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+---
+ drivers/iommu/iommu.c | 5 ++++-
+ drivers/remoteproc/remoteproc_core.c | 4 ++--
+ include/linux/iommu.h | 10 ++++++----
+ 3 files changed, 12 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
+index 2198b2d..8b9ded8 100644
+--- a/drivers/iommu/iommu.c
++++ b/drivers/iommu/iommu.c
+@@ -119,6 +119,7 @@ EXPORT_SYMBOL_GPL(iommu_present);
+ * iommu_set_fault_handler() - set a fault handler for an iommu domain
+ * @domain: iommu domain
+ * @handler: fault handler
++ * @token: user data, will be passed back to the fault handler
+ *
+ * This function should be used by IOMMU users which want to be notified
+ * whenever an IOMMU fault happens.
+@@ -127,11 +128,13 @@ EXPORT_SYMBOL_GPL(iommu_present);
+ * error code otherwise.
+ */
+ void iommu_set_fault_handler(struct iommu_domain *domain,
+- iommu_fault_handler_t handler)
++ iommu_fault_handler_t handler,
++ void *token)
+ {
+ BUG_ON(!domain);
+
+ domain->handler = handler;
++ domain->handler_token = token;
+ }
+ EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
+
+diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
+index 7591b97..0142e52 100644
+--- a/drivers/remoteproc/remoteproc_core.c
++++ b/drivers/remoteproc/remoteproc_core.c
+@@ -78,7 +78,7 @@ typedef int (*rproc_handle_resource_t)(struct rproc *rproc, void *, int avail);
+ * the recovery of the remote processor.
+ */
+ static int rproc_iommu_fault(struct iommu_domain *domain, struct device *dev,
+- unsigned long iova, int flags)
++ unsigned long iova, int flags, void *token)
+ {
+ dev_err(dev, "iommu fault: da 0x%lx flags 0x%x\n", iova, flags);
+
+@@ -117,7 +117,7 @@ static int rproc_enable_iommu(struct rproc *rproc)
+ return -ENOMEM;
+ }
+
+- iommu_set_fault_handler(domain, rproc_iommu_fault);
++ iommu_set_fault_handler(domain, rproc_iommu_fault, rproc);
+
+ ret = iommu_attach_device(domain, dev);
+ if (ret) {
+diff --git a/include/linux/iommu.h b/include/linux/iommu.h
+index d937580..450293f 100644
+--- a/include/linux/iommu.h
++++ b/include/linux/iommu.h
+@@ -35,12 +35,13 @@ struct iommu_domain;
+ #define IOMMU_FAULT_WRITE 0x1
+
+ typedef int (*iommu_fault_handler_t)(struct iommu_domain *,
+- struct device *, unsigned long, int);
++ struct device *, unsigned long, int, void *);
+
+ struct iommu_domain {
+ struct iommu_ops *ops;
+ void *priv;
+ iommu_fault_handler_t handler;
++ void *handler_token;
+ };
+
+ #define IOMMU_CAP_CACHE_COHERENCY 0x1
+@@ -95,7 +96,7 @@ extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
+ extern int iommu_domain_has_cap(struct iommu_domain *domain,
+ unsigned long cap);
+ extern void iommu_set_fault_handler(struct iommu_domain *domain,
+- iommu_fault_handler_t handler);
++ iommu_fault_handler_t handler, void *token);
+ extern int iommu_device_group(struct device *dev, unsigned int *groupid);
+
+ /**
+@@ -132,7 +133,8 @@ static inline int report_iommu_fault(struct iommu_domain *domain,
+ * invoke it.
+ */
+ if (domain->handler)
+- ret = domain->handler(domain, dev, iova, flags);
++ ret = domain->handler(domain, dev, iova, flags,
++ domain->handler_token);
+
+ return ret;
+ }
+@@ -191,7 +193,7 @@ static inline int domain_has_cap(struct iommu_domain *domain,
+ }
+
+ static inline void iommu_set_fault_handler(struct iommu_domain *domain,
+- iommu_fault_handler_t handler)
++ iommu_fault_handler_t handler, void *token)
+ {
+ }
+
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/mm-clean-up-__count_immobile_pages.patch b/patches.dma-mapping/mm-clean-up-__count_immobile_pages.patch
new file mode 100644
index 0000000000000..01775d794f3a7
--- /dev/null
+++ b/patches.dma-mapping/mm-clean-up-__count_immobile_pages.patch
@@ -0,0 +1,112 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:53:40 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:51:02 +0900
+Subject: [PATCH v2 45/58] mm: clean up __count_immobile_pages()
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-46-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Minchan Kim <minchan@kernel.org>
+
+The __count_immobile_pages() naming is rather awkward. Choose a more
+clear name and add a comment.
+
+Signed-off-by: Minchan Kim <minchan@kernel.org>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Mel Gorman <mgorman@suse.de>
+Cc: Michal Hocko <mhocko@suse.cz>
+Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
+Cc: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+(cherry picked from commit 80934513b230bfcf70265f2ef0fdae89fb391633)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+---
+ mm/page_alloc.c | 34 ++++++++++++++++++----------------
+ 1 files changed, 18 insertions(+), 16 deletions(-)
+
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index d993631..84f82e3 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -5471,26 +5471,28 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags,
+ }
+
+ /*
+- * This is designed as sub function...plz see page_isolation.c also.
+- * set/clear page block's type to be ISOLATE.
+- * page allocater never alloc memory from ISOLATE block.
++ * This function checks whether pageblock includes unmovable pages or not.
++ * If @count is not zero, it is okay to include less @count unmovable pages
++ *
++ * PageLRU check wihtout isolation or lru_lock could race so that
++ * MIGRATE_MOVABLE block might include unmovable pages. It means you can't
++ * expect this function should be exact.
+ */
+-
+-static int
+-__count_immobile_pages(struct zone *zone, struct page *page, int count)
++static bool
++__has_unmovable_pages(struct zone *zone, struct page *page, int count)
+ {
+ unsigned long pfn, iter, found;
+ int mt;
+
+ /*
+ * For avoiding noise data, lru_add_drain_all() should be called
+- * If ZONE_MOVABLE, the zone never contains immobile pages
++ * If ZONE_MOVABLE, the zone never contains unmovable pages
+ */
+ if (zone_idx(zone) == ZONE_MOVABLE)
+- return true;
++ return false;
+ mt = get_pageblock_migratetype(page);
+ if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt))
+- return true;
++ return false;
+
+ pfn = page_to_pfn(page);
+ for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
+@@ -5521,9 +5523,9 @@ __count_immobile_pages(struct zone *zone, struct page *page, int count)
+ * page at boot.
+ */
+ if (found > count)
+- return false;
++ return true;
+ }
+- return true;
++ return false;
+ }
+
+ bool is_pageblock_removable_nolock(struct page *page)
+@@ -5547,7 +5549,7 @@ bool is_pageblock_removable_nolock(struct page *page)
+ zone->zone_start_pfn + zone->spanned_pages <= pfn)
+ return false;
+
+- return __count_immobile_pages(zone, page, 0);
++ return !__has_unmovable_pages(zone, page, 0);
+ }
+
+ int set_migratetype_isolate(struct page *page)
+@@ -5586,12 +5588,12 @@ int set_migratetype_isolate(struct page *page)
+ * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
+ * We just check MOVABLE pages.
+ */
+- if (__count_immobile_pages(zone, page, arg.pages_found))
++ if (!__has_unmovable_pages(zone, page, arg.pages_found))
+ ret = 0;
+-
+ /*
+- * immobile means "not-on-lru" paes. If immobile is larger than
+- * removable-by-driver pages reported by notifier, we'll fail.
++ * Unmovable means "not-on-lru" pages. If Unmovable pages are
++ * larger than removable-by-driver pages reported by notifier,
++ * we'll fail.
+ */
+
+ out:
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/mm-cma-don-t-replace-lowmem-pages-with-highmem.patch b/patches.dma-mapping/mm-cma-don-t-replace-lowmem-pages-with-highmem.patch
new file mode 100644
index 0000000000000..e5025dd5758ae
--- /dev/null
+++ b/patches.dma-mapping/mm-cma-don-t-replace-lowmem-pages-with-highmem.patch
@@ -0,0 +1,70 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:53:17 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:50:52 +0900
+Subject: [PATCH v2 35/58] mm: cma: don't replace lowmem pages with highmem
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-36-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Rabin Vincent <rabin@rab.in>
+
+The filesystem layer expects pages in the block device's mapping to not
+be in highmem (the mapping's gfp mask is set in bdget()), but CMA can
+currently replace lowmem pages with highmem pages, leading to crashes in
+filesystem code such as the one below:
+
+ Unable to handle kernel NULL pointer dereference at virtual address 00000400
+ pgd = c0c98000
+ [00000400] *pgd=00c91831, *pte=00000000, *ppte=00000000
+ Internal error: Oops: 817 [#1] PREEMPT SMP ARM
+ CPU: 0 Not tainted (3.5.0-rc5+ #80)
+ PC is at __memzero+0x24/0x80
+ ...
+ Process fsstress (pid: 323, stack limit = 0xc0cbc2f0)
+ Backtrace:
+ [<c010e3f0>] (ext4_getblk+0x0/0x180) from [<c010e58c>] (ext4_bread+0x1c/0x98)
+ [<c010e570>] (ext4_bread+0x0/0x98) from [<c0117944>] (ext4_mkdir+0x160/0x3bc)
+ r4:c15337f0
+ [<c01177e4>] (ext4_mkdir+0x0/0x3bc) from [<c00c29e0>] (vfs_mkdir+0x8c/0x98)
+ [<c00c2954>] (vfs_mkdir+0x0/0x98) from [<c00c2a60>] (sys_mkdirat+0x74/0xac)
+ r6:00000000 r5:c152eb40 r4:000001ff r3:c14b43f0
+ [<c00c29ec>] (sys_mkdirat+0x0/0xac) from [<c00c2ab8>] (sys_mkdir+0x20/0x24)
+ r6:beccdcf0 r5:00074000 r4:beccdbbc
+ [<c00c2a98>] (sys_mkdir+0x0/0x24) from [<c000e3c0>] (ret_fast_syscall+0x0/0x30)
+
+Fix this by replacing only highmem pages with highmem.
+
+Reported-by: Laura Abbott <lauraa@codeaurora.org>
+Signed-off-by: Rabin Vincent <rabin@rab.in>
+Acked-by: Michal Nazarewicz <mina86@mina86.com>
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+(cherry picked from commit 6a6dccba2fdc2a69f1f36b8f1c0acc8598e7221b)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+---
+ mm/page_alloc.c | 7 ++++++-
+ 1 files changed, 6 insertions(+), 1 deletions(-)
+
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index ed85c02..d993631 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -5638,7 +5638,12 @@ static struct page *
+ __alloc_contig_migrate_alloc(struct page *page, unsigned long private,
+ int **resultp)
+ {
+- return alloc_page(GFP_HIGHUSER_MOVABLE);
++ gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE;
++
++ if (PageHighMem(page))
++ gfp_mask |= __GFP_HIGHMEM;
++
++ return alloc_page(gfp_mask);
+ }
+
+ /* [start, end) must belong to a single zone. */
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/mm-cma-fix-alignment-requirements-for-contiguous-regions.patch b/patches.dma-mapping/mm-cma-fix-alignment-requirements-for-contiguous-regions.patch
new file mode 100644
index 0000000000000..27276d4fc9369
--- /dev/null
+++ b/patches.dma-mapping/mm-cma-fix-alignment-requirements-for-contiguous-regions.patch
@@ -0,0 +1,44 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:53:58 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:51:09 +0900
+Subject: [PATCH v2 52/58] mm: cma: fix alignment requirements for contiguous regions
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-53-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+
+Contiguous Memory Allocator requires each of its regions to be aligned
+in such a way that it is possible to change migration type for all
+pageblocks holding it and then isolate page of largest possible order from
+the buddy allocator (which is MAX_ORDER-1). This patch relaxes alignment
+requirements by one order, because MAX_ORDER alignment is not really
+needed.
+
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+CC: Michal Nazarewicz <mina86@mina86.com>
+Acked-by: Michal Nazarewicz <mina86@mina86.com>
+(cherry picked from commit 7ce9bf1f4785dab0598a19a7fcb0733a18193e4e)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+---
+ drivers/base/dma-contiguous.c | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
+index 78efb03..34d94c7 100644
+--- a/drivers/base/dma-contiguous.c
++++ b/drivers/base/dma-contiguous.c
+@@ -250,7 +250,7 @@ int __init dma_declare_contiguous(struct device *dev, unsigned long size,
+ return -EINVAL;
+
+ /* Sanitise input arguments */
+- alignment = PAGE_SIZE << max(MAX_ORDER, pageblock_order);
++ alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
+ base = ALIGN(base, alignment);
+ size = ALIGN(size, alignment);
+ limit &= ~(alignment - 1);
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/mm-cma-fix-condition-check-when-setting-global-cma-area.patch b/patches.dma-mapping/mm-cma-fix-condition-check-when-setting-global-cma-area.patch
new file mode 100644
index 0000000000000..a2b787cebf4d9
--- /dev/null
+++ b/patches.dma-mapping/mm-cma-fix-condition-check-when-setting-global-cma-area.patch
@@ -0,0 +1,39 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:53:18 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:50:53 +0900
+Subject: [PATCH v2 36/58] mm: cma: fix condition check when setting global cma area
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-37-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+
+dev_set_cma_area incorrectly assigned cma to global area on first call
+due to incorrect check. This patch fixes this issue.
+
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+(cherry picked from commit cc2caea5b6152b8ce66dc2bbe83dc72b60612da8)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+---
+ include/asm-generic/dma-contiguous.h | 2 +-
+ 1 files changed, 1 insertions(+), 1 deletions(-)
+
+diff --git a/include/asm-generic/dma-contiguous.h b/include/asm-generic/dma-contiguous.h
+index c544356..294b1e7 100644
+--- a/include/asm-generic/dma-contiguous.h
++++ b/include/asm-generic/dma-contiguous.h
+@@ -18,7 +18,7 @@ static inline void dev_set_cma_area(struct device *dev, struct cma *cma)
+ {
+ if (dev)
+ dev->cma_area = cma;
+- if (!dev || !dma_contiguous_default_area)
++ if (!dev && !dma_contiguous_default_area)
+ dma_contiguous_default_area = cma;
+ }
+
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/mm-compaction-export-some-of-the-functions.patch b/patches.dma-mapping/mm-compaction-export-some-of-the-functions.patch
new file mode 100644
index 0000000000000..e310166c1c705
--- /dev/null
+++ b/patches.dma-mapping/mm-compaction-export-some-of-the-functions.patch
@@ -0,0 +1,511 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:51:58 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:50:22 +0900
+Subject: [PATCH v2 05/58] mm: compaction: export some of the functions
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-6-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Michal Nazarewicz <mina86@mina86.com>
+
+This commit exports some of the functions from compaction.c file
+outside of it adding their declaration into internal.h header
+file so that other mm related code can use them.
+
+This forced compaction.c to always be compiled (as opposed to being
+compiled only if CONFIG_COMPACTION is defined) but as to avoid
+introducing code that user did not ask for, part of the compaction.c
+is now wrapped in on #ifdef.
+
+Signed-off-by: Michal Nazarewicz <mina86@mina86.com>
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Acked-by: Mel Gorman <mel@csn.ul.ie>
+Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
+Tested-by: Rob Clark <rob.clark@linaro.org>
+Tested-by: Ohad Ben-Cohen <ohad@wizery.com>
+Tested-by: Benjamin Gaignard <benjamin.gaignard@linaro.org>
+Tested-by: Robert Nelson <robertcnelson@gmail.com>
+Tested-by: Barry Song <Baohua.Song@csr.com>
+(cherry picked from commit ff9543fd32060917beb080b1eb2d1d41ec7f39e0)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+---
+ mm/Makefile | 3 +-
+ mm/compaction.c | 328 ++++++++++++++++++++++++++-----------------------------
+ mm/internal.h | 33 ++++++
+ 3 files changed, 191 insertions(+), 173 deletions(-)
+
+diff --git a/mm/Makefile b/mm/Makefile
+index 50ec00e..8aada89 100644
+--- a/mm/Makefile
++++ b/mm/Makefile
+@@ -13,7 +13,7 @@ obj-y := filemap.o mempool.o oom_kill.o fadvise.o \
+ readahead.o swap.o truncate.o vmscan.o shmem.o \
+ prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \
+ page_isolation.o mm_init.o mmu_context.o percpu.o \
+- $(mmu-y)
++ compaction.o $(mmu-y)
+ obj-y += init-mm.o
+
+ ifdef CONFIG_NO_BOOTMEM
+@@ -32,7 +32,6 @@ obj-$(CONFIG_NUMA) += mempolicy.o
+ obj-$(CONFIG_SPARSEMEM) += sparse.o
+ obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o
+ obj-$(CONFIG_SLOB) += slob.o
+-obj-$(CONFIG_COMPACTION) += compaction.o
+ obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o
+ obj-$(CONFIG_KSM) += ksm.o
+ obj-$(CONFIG_PAGE_POISONING) += debug-pagealloc.o
+diff --git a/mm/compaction.c b/mm/compaction.c
+index b150a62..dcb3865 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -16,30 +16,11 @@
+ #include <linux/sysfs.h>
+ #include "internal.h"
+
++#if defined CONFIG_COMPACTION || defined CONFIG_CMA
++
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/compaction.h>
+
+-/*
+- * compact_control is used to track pages being migrated and the free pages
+- * they are being migrated to during memory compaction. The free_pfn starts
+- * at the end of a zone and migrate_pfn begins at the start. Movable pages
+- * are moved to the end of a zone during a compaction run and the run
+- * completes when free_pfn <= migrate_pfn
+- */
+-struct compact_control {
+- struct list_head freepages; /* List of free pages to migrate to */
+- struct list_head migratepages; /* List of pages being migrated */
+- unsigned long nr_freepages; /* Number of isolated free pages */
+- unsigned long nr_migratepages; /* Number of pages to migrate */
+- unsigned long free_pfn; /* isolate_freepages search base */
+- unsigned long migrate_pfn; /* isolate_migratepages search base */
+- bool sync; /* Synchronous migration */
+-
+- int order; /* order a direct compactor needs */
+- int migratetype; /* MOVABLE, RECLAIMABLE etc */
+- struct zone *zone;
+-};
+-
+ static unsigned long release_freepages(struct list_head *freelist)
+ {
+ struct page *page, *next;
+@@ -54,6 +35,16 @@ static unsigned long release_freepages(struct list_head *freelist)
+ return count;
+ }
+
++static void map_pages(struct list_head *list)
++{
++ struct page *page;
++
++ list_for_each_entry(page, list, lru) {
++ arch_alloc_page(page, 0);
++ kernel_map_pages(page, 1, 1);
++ }
++}
++
+ /*
+ * Isolate free pages onto a private freelist. Caller must hold zone->lock.
+ * If @strict is true, will abort returning 0 on any invalid PFNs or non-free
+@@ -122,7 +113,7 @@ static unsigned long isolate_freepages_block(unsigned long blockpfn,
+ * (which may be greater then end_pfn if end fell in a middle of
+ * a free page).
+ */
+-static unsigned long
++unsigned long
+ isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn)
+ {
+ unsigned long isolated, pfn, block_end_pfn, flags;
+@@ -176,127 +167,6 @@ isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn)
+ return pfn;
+ }
+
+-/* Returns true if the page is within a block suitable for migration to */
+-static bool suitable_migration_target(struct page *page)
+-{
+-
+- int migratetype = get_pageblock_migratetype(page);
+-
+- /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
+- if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
+- return false;
+-
+- /* If the page is a large free page, then allow migration */
+- if (PageBuddy(page) && page_order(page) >= pageblock_order)
+- return true;
+-
+- /* If the block is MIGRATE_MOVABLE, allow migration */
+- if (migratetype == MIGRATE_MOVABLE)
+- return true;
+-
+- /* Otherwise skip the block */
+- return false;
+-}
+-
+-static void map_pages(struct list_head *list)
+-{
+- struct page *page;
+-
+- list_for_each_entry(page, list, lru) {
+- arch_alloc_page(page, 0);
+- kernel_map_pages(page, 1, 1);
+- }
+-}
+-
+-/*
+- * Based on information in the current compact_control, find blocks
+- * suitable for isolating free pages from and then isolate them.
+- */
+-static void isolate_freepages(struct zone *zone,
+- struct compact_control *cc)
+-{
+- struct page *page;
+- unsigned long high_pfn, low_pfn, pfn, zone_end_pfn, end_pfn;
+- unsigned long flags;
+- int nr_freepages = cc->nr_freepages;
+- struct list_head *freelist = &cc->freepages;
+-
+- /*
+- * Initialise the free scanner. The starting point is where we last
+- * scanned from (or the end of the zone if starting). The low point
+- * is the end of the pageblock the migration scanner is using.
+- */
+- pfn = cc->free_pfn;
+- low_pfn = cc->migrate_pfn + pageblock_nr_pages;
+-
+- /*
+- * Take care that if the migration scanner is at the end of the zone
+- * that the free scanner does not accidentally move to the next zone
+- * in the next isolation cycle.
+- */
+- high_pfn = min(low_pfn, pfn);
+-
+- zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
+-
+- /*
+- * Isolate free pages until enough are available to migrate the
+- * pages on cc->migratepages. We stop searching if the migrate
+- * and free page scanners meet or enough free pages are isolated.
+- */
+- for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
+- pfn -= pageblock_nr_pages) {
+- unsigned long isolated;
+-
+- if (!pfn_valid(pfn))
+- continue;
+-
+- /*
+- * Check for overlapping nodes/zones. It's possible on some
+- * configurations to have a setup like
+- * node0 node1 node0
+- * i.e. it's possible that all pages within a zones range of
+- * pages do not belong to a single zone.
+- */
+- page = pfn_to_page(pfn);
+- if (page_zone(page) != zone)
+- continue;
+-
+- /* Check the block is suitable for migration */
+- if (!suitable_migration_target(page))
+- continue;
+-
+- /*
+- * Found a block suitable for isolating free pages from. Now
+- * we disabled interrupts, double check things are ok and
+- * isolate the pages. This is to minimise the time IRQs
+- * are disabled
+- */
+- isolated = 0;
+- spin_lock_irqsave(&zone->lock, flags);
+- if (suitable_migration_target(page)) {
+- end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn);
+- isolated = isolate_freepages_block(pfn, end_pfn,
+- freelist, false);
+- nr_freepages += isolated;
+- }
+- spin_unlock_irqrestore(&zone->lock, flags);
+-
+- /*
+- * Record the highest PFN we isolated pages from. When next
+- * looking for free pages, the search will restart here as
+- * page migration may have returned some pages to the allocator
+- */
+- if (isolated)
+- high_pfn = max(high_pfn, pfn);
+- }
+-
+- /* split_free_page does not map the pages */
+- map_pages(freelist);
+-
+- cc->free_pfn = high_pfn;
+- cc->nr_freepages = nr_freepages;
+-}
+-
+ /* Update the number of anon and file isolated pages in the zone */
+ static void acct_isolated(struct zone *zone, struct compact_control *cc)
+ {
+@@ -325,13 +195,6 @@ static bool too_many_isolated(struct zone *zone)
+ return isolated > (inactive + active) / 2;
+ }
+
+-/* possible outcome of isolate_migratepages */
+-typedef enum {
+- ISOLATE_ABORT, /* Abort compaction now */
+- ISOLATE_NONE, /* No pages isolated, continue scanning */
+- ISOLATE_SUCCESS, /* Pages isolated, migrate */
+-} isolate_migrate_t;
+-
+ /**
+ * isolate_migratepages_range() - isolate all migrate-able pages in range.
+ * @zone: Zone pages are in.
+@@ -351,7 +214,7 @@ typedef enum {
+ * does not modify any cc's fields, in particular it does not modify
+ * (or read for that matter) cc->migrate_pfn.
+ */
+-static unsigned long
++unsigned long
+ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
+ unsigned long low_pfn, unsigned long end_pfn)
+ {
+@@ -487,35 +350,118 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
+ return low_pfn;
+ }
+
++#endif /* CONFIG_COMPACTION || CONFIG_CMA */
++#ifdef CONFIG_COMPACTION
++
++/* Returns true if the page is within a block suitable for migration to */
++static bool suitable_migration_target(struct page *page)
++{
++
++ int migratetype = get_pageblock_migratetype(page);
++
++ /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
++ if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
++ return false;
++
++ /* If the page is a large free page, then allow migration */
++ if (PageBuddy(page) && page_order(page) >= pageblock_order)
++ return true;
++
++ /* If the block is MIGRATE_MOVABLE, allow migration */
++ if (migratetype == MIGRATE_MOVABLE)
++ return true;
++
++ /* Otherwise skip the block */
++ return false;
++}
++
+ /*
+- * Isolate all pages that can be migrated from the block pointed to by
+- * the migrate scanner within compact_control.
++ * Based on information in the current compact_control, find blocks
++ * suitable for isolating free pages from and then isolate them.
+ */
+-static isolate_migrate_t isolate_migratepages(struct zone *zone,
+- struct compact_control *cc)
++static void isolate_freepages(struct zone *zone,
++ struct compact_control *cc)
+ {
+- unsigned long low_pfn, end_pfn;
++ struct page *page;
++ unsigned long high_pfn, low_pfn, pfn, zone_end_pfn, end_pfn;
++ unsigned long flags;
++ int nr_freepages = cc->nr_freepages;
++ struct list_head *freelist = &cc->freepages;
+
+- /* Do not scan outside zone boundaries */
+- low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
++ /*
++ * Initialise the free scanner. The starting point is where we last
++ * scanned from (or the end of the zone if starting). The low point
++ * is the end of the pageblock the migration scanner is using.
++ */
++ pfn = cc->free_pfn;
++ low_pfn = cc->migrate_pfn + pageblock_nr_pages;
+
+- /* Only scan within a pageblock boundary */
+- end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages);
++ /*
++ * Take care that if the migration scanner is at the end of the zone
++ * that the free scanner does not accidentally move to the next zone
++ * in the next isolation cycle.
++ */
++ high_pfn = min(low_pfn, pfn);
+
+- /* Do not cross the free scanner or scan within a memory hole */
+- if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
+- cc->migrate_pfn = end_pfn;
+- return ISOLATE_NONE;
+- }
++ zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
+
+- /* Perform the isolation */
+- low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn);
+- if (!low_pfn)
+- return ISOLATE_ABORT;
++ /*
++ * Isolate free pages until enough are available to migrate the
++ * pages on cc->migratepages. We stop searching if the migrate
++ * and free page scanners meet or enough free pages are isolated.
++ */
++ for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
++ pfn -= pageblock_nr_pages) {
++ unsigned long isolated;
+
+- cc->migrate_pfn = low_pfn;
++ if (!pfn_valid(pfn))
++ continue;
+
+- return ISOLATE_SUCCESS;
++ /*
++ * Check for overlapping nodes/zones. It's possible on some
++ * configurations to have a setup like
++ * node0 node1 node0
++ * i.e. it's possible that all pages within a zones range of
++ * pages do not belong to a single zone.
++ */
++ page = pfn_to_page(pfn);
++ if (page_zone(page) != zone)
++ continue;
++
++ /* Check the block is suitable for migration */
++ if (!suitable_migration_target(page))
++ continue;
++
++ /*
++ * Found a block suitable for isolating free pages from. Now
++ * we disabled interrupts, double check things are ok and
++ * isolate the pages. This is to minimise the time IRQs
++ * are disabled
++ */
++ isolated = 0;
++ spin_lock_irqsave(&zone->lock, flags);
++ if (suitable_migration_target(page)) {
++ end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn);
++ isolated = isolate_freepages_block(pfn, end_pfn,
++ freelist, false);
++ nr_freepages += isolated;
++ }
++ spin_unlock_irqrestore(&zone->lock, flags);
++
++ /*
++ * Record the highest PFN we isolated pages from. When next
++ * looking for free pages, the search will restart here as
++ * page migration may have returned some pages to the allocator
++ */
++ if (isolated)
++ high_pfn = max(high_pfn, pfn);
++ }
++
++ /* split_free_page does not map the pages */
++ map_pages(freelist);
++
++ cc->free_pfn = high_pfn;
++ cc->nr_freepages = nr_freepages;
+ }
+
+ /*
+@@ -564,6 +510,44 @@ static void update_nr_listpages(struct compact_control *cc)
+ cc->nr_freepages = nr_freepages;
+ }
+
++/* possible outcome of isolate_migratepages */
++typedef enum {
++ ISOLATE_ABORT, /* Abort compaction now */
++ ISOLATE_NONE, /* No pages isolated, continue scanning */
++ ISOLATE_SUCCESS, /* Pages isolated, migrate */
++} isolate_migrate_t;
++
++/*
++ * Isolate all pages that can be migrated from the block pointed to by
++ * the migrate scanner within compact_control.
++ */
++static isolate_migrate_t isolate_migratepages(struct zone *zone,
++ struct compact_control *cc)
++{
++ unsigned long low_pfn, end_pfn;
++
++ /* Do not scan outside zone boundaries */
++ low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
++
++ /* Only scan within a pageblock boundary */
++ end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages);
++
++ /* Do not cross the free scanner or scan within a memory hole */
++ if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
++ cc->migrate_pfn = end_pfn;
++ return ISOLATE_NONE;
++ }
++
++ /* Perform the isolation */
++ low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn);
++ if (!low_pfn)
++ return ISOLATE_ABORT;
++
++ cc->migrate_pfn = low_pfn;
++
++ return ISOLATE_SUCCESS;
++}
++
+ static int compact_finished(struct zone *zone,
+ struct compact_control *cc)
+ {
+@@ -913,3 +897,5 @@ void compaction_unregister_node(struct node *node)
+ return device_remove_file(&node->dev, &dev_attr_compact);
+ }
+ #endif /* CONFIG_SYSFS && CONFIG_NUMA */
++
++#endif /* CONFIG_COMPACTION */
+diff --git a/mm/internal.h b/mm/internal.h
+index 2189af4..aee4761 100644
+--- a/mm/internal.h
++++ b/mm/internal.h
+@@ -100,6 +100,39 @@ extern void prep_compound_page(struct page *page, unsigned long order);
+ extern bool is_free_buddy_page(struct page *page);
+ #endif
+
++#if defined CONFIG_COMPACTION || defined CONFIG_CMA
++
++/*
++ * in mm/compaction.c
++ */
++/*
++ * compact_control is used to track pages being migrated and the free pages
++ * they are being migrated to during memory compaction. The free_pfn starts
++ * at the end of a zone and migrate_pfn begins at the start. Movable pages
++ * are moved to the end of a zone during a compaction run and the run
++ * completes when free_pfn <= migrate_pfn
++ */
++struct compact_control {
++ struct list_head freepages; /* List of free pages to migrate to */
++ struct list_head migratepages; /* List of pages being migrated */
++ unsigned long nr_freepages; /* Number of isolated free pages */
++ unsigned long nr_migratepages; /* Number of pages to migrate */
++ unsigned long free_pfn; /* isolate_freepages search base */
++ unsigned long migrate_pfn; /* isolate_migratepages search base */
++ bool sync; /* Synchronous migration */
++
++ int order; /* order a direct compactor needs */
++ int migratetype; /* MOVABLE, RECLAIMABLE etc */
++ struct zone *zone;
++};
++
++unsigned long
++isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn);
++unsigned long
++isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
++ unsigned long low_pfn, unsigned long end_pfn);
++
++#endif
+
+ /*
+ * function for dealing with page's order in buddy system.
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/mm-compaction-introduce-isolate_freepages_range.patch b/patches.dma-mapping/mm-compaction-introduce-isolate_freepages_range.patch
new file mode 100644
index 0000000000000..2a09267a703d5
--- /dev/null
+++ b/patches.dma-mapping/mm-compaction-introduce-isolate_freepages_range.patch
@@ -0,0 +1,205 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:51:55 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:50:21 +0900
+Subject: [PATCH v2 04/58] mm: compaction: introduce isolate_freepages_range()
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-5-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Michal Nazarewicz <mina86@mina86.com>
+
+This commit introduces isolate_freepages_range() function which
+generalises isolate_freepages_block() so that it can be used on
+arbitrary PFN ranges.
+
+isolate_freepages_block() is left with only minor changes.
+
+Signed-off-by: Michal Nazarewicz <mina86@mina86.com>
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Acked-by: Mel Gorman <mel@csn.ul.ie>
+Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
+Tested-by: Rob Clark <rob.clark@linaro.org>
+Tested-by: Ohad Ben-Cohen <ohad@wizery.com>
+Tested-by: Benjamin Gaignard <benjamin.gaignard@linaro.org>
+Tested-by: Robert Nelson <robertcnelson@gmail.com>
+Tested-by: Barry Song <Baohua.Song@csr.com>
+(cherry picked from commit 85aa125f001f87f96a72e9e6ee515490843b1202)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+---
+ mm/compaction.c | 111 ++++++++++++++++++++++++++++++++++++++++++++++---------
+ 1 files changed, 93 insertions(+), 18 deletions(-)
+
+diff --git a/mm/compaction.c b/mm/compaction.c
+index f740488..b150a62 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -54,24 +54,20 @@ static unsigned long release_freepages(struct list_head *freelist)
+ return count;
+ }
+
+-/* Isolate free pages onto a private freelist. Must hold zone->lock */
+-static unsigned long isolate_freepages_block(struct zone *zone,
+- unsigned long blockpfn,
+- struct list_head *freelist)
++/*
++ * Isolate free pages onto a private freelist. Caller must hold zone->lock.
++ * If @strict is true, will abort returning 0 on any invalid PFNs or non-free
++ * pages inside of the pageblock (even though it may still end up isolating
++ * some pages).
++ */
++static unsigned long isolate_freepages_block(unsigned long blockpfn,
++ unsigned long end_pfn,
++ struct list_head *freelist,
++ bool strict)
+ {
+- unsigned long zone_end_pfn, end_pfn;
+ int nr_scanned = 0, total_isolated = 0;
+ struct page *cursor;
+
+- /* Get the last PFN we should scan for free pages at */
+- zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
+- end_pfn = min(blockpfn + pageblock_nr_pages, zone_end_pfn);
+-
+- /* Find the first usable PFN in the block to initialse page cursor */
+- for (; blockpfn < end_pfn; blockpfn++) {
+- if (pfn_valid_within(blockpfn))
+- break;
+- }
+ cursor = pfn_to_page(blockpfn);
+
+ /* Isolate free pages. This assumes the block is valid */
+@@ -79,15 +75,23 @@ static unsigned long isolate_freepages_block(struct zone *zone,
+ int isolated, i;
+ struct page *page = cursor;
+
+- if (!pfn_valid_within(blockpfn))
++ if (!pfn_valid_within(blockpfn)) {
++ if (strict)
++ return 0;
+ continue;
++ }
+ nr_scanned++;
+
+- if (!PageBuddy(page))
++ if (!PageBuddy(page)) {
++ if (strict)
++ return 0;
+ continue;
++ }
+
+ /* Found a free page, break it into order-0 pages */
+ isolated = split_free_page(page);
++ if (!isolated && strict)
++ return 0;
+ total_isolated += isolated;
+ for (i = 0; i < isolated; i++) {
+ list_add(&page->lru, freelist);
+@@ -105,6 +109,73 @@ static unsigned long isolate_freepages_block(struct zone *zone,
+ return total_isolated;
+ }
+
++/**
++ * isolate_freepages_range() - isolate free pages.
++ * @start_pfn: The first PFN to start isolating.
++ * @end_pfn: The one-past-last PFN.
++ *
++ * Non-free pages, invalid PFNs, or zone boundaries within the
++ * [start_pfn, end_pfn) range are considered errors, cause function to
++ * undo its actions and return zero.
++ *
++ * Otherwise, function returns one-past-the-last PFN of isolated page
++ * (which may be greater then end_pfn if end fell in a middle of
++ * a free page).
++ */
++static unsigned long
++isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn)
++{
++ unsigned long isolated, pfn, block_end_pfn, flags;
++ struct zone *zone = NULL;
++ LIST_HEAD(freelist);
++
++ if (pfn_valid(start_pfn))
++ zone = page_zone(pfn_to_page(start_pfn));
++
++ for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) {
++ if (!pfn_valid(pfn) || zone != page_zone(pfn_to_page(pfn)))
++ break;
++
++ /*
++ * On subsequent iterations ALIGN() is actually not needed,
++ * but we keep it that we not to complicate the code.
++ */
++ block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
++ block_end_pfn = min(block_end_pfn, end_pfn);
++
++ spin_lock_irqsave(&zone->lock, flags);
++ isolated = isolate_freepages_block(pfn, block_end_pfn,
++ &freelist, true);
++ spin_unlock_irqrestore(&zone->lock, flags);
++
++ /*
++ * In strict mode, isolate_freepages_block() returns 0 if
++ * there are any holes in the block (ie. invalid PFNs or
++ * non-free pages).
++ */
++ if (!isolated)
++ break;
++
++ /*
++ * If we managed to isolate pages, it is always (1 << n) *
++ * pageblock_nr_pages for some non-negative n. (Max order
++ * page may span two pageblocks).
++ */
++ }
++
++ /* split_free_page does not map the pages */
++ map_pages(&freelist);
++
++ if (pfn < end_pfn) {
++ /* Loop terminated early, cleanup. */
++ release_freepages(&freelist);
++ return 0;
++ }
++
++ /* We don't use freelists for anything. */
++ return pfn;
++}
++
+ /* Returns true if the page is within a block suitable for migration to */
+ static bool suitable_migration_target(struct page *page)
+ {
+@@ -145,7 +216,7 @@ static void isolate_freepages(struct zone *zone,
+ struct compact_control *cc)
+ {
+ struct page *page;
+- unsigned long high_pfn, low_pfn, pfn;
++ unsigned long high_pfn, low_pfn, pfn, zone_end_pfn, end_pfn;
+ unsigned long flags;
+ int nr_freepages = cc->nr_freepages;
+ struct list_head *freelist = &cc->freepages;
+@@ -165,6 +236,8 @@ static void isolate_freepages(struct zone *zone,
+ */
+ high_pfn = min(low_pfn, pfn);
+
++ zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
++
+ /*
+ * Isolate free pages until enough are available to migrate the
+ * pages on cc->migratepages. We stop searching if the migrate
+@@ -201,7 +274,9 @@ static void isolate_freepages(struct zone *zone,
+ isolated = 0;
+ spin_lock_irqsave(&zone->lock, flags);
+ if (suitable_migration_target(page)) {
+- isolated = isolate_freepages_block(zone, pfn, freelist);
++ end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn);
++ isolated = isolate_freepages_block(pfn, end_pfn,
++ freelist, false);
+ nr_freepages += isolated;
+ }
+ spin_unlock_irqrestore(&zone->lock, flags);
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/mm-compaction-introduce-isolate_migratepages_range.patch b/patches.dma-mapping/mm-compaction-introduce-isolate_migratepages_range.patch
new file mode 100644
index 0000000000000..eadeb5ed0cce0
--- /dev/null
+++ b/patches.dma-mapping/mm-compaction-introduce-isolate_migratepages_range.patch
@@ -0,0 +1,152 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:51:51 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:50:19 +0900
+Subject: [PATCH v2 02/58] mm: compaction: introduce isolate_migratepages_range()
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-3-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Michal Nazarewicz <mina86@mina86.com>
+
+This commit introduces isolate_migratepages_range() function which
+extracts functionality from isolate_migratepages() so that it can be
+used on arbitrary PFN ranges.
+
+isolate_migratepages() function is implemented as a simple wrapper
+around isolate_migratepages_range().
+
+Signed-off-by: Michal Nazarewicz <mina86@mina86.com>
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Acked-by: Mel Gorman <mel@csn.ul.ie>
+Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
+Tested-by: Rob Clark <rob.clark@linaro.org>
+Tested-by: Ohad Ben-Cohen <ohad@wizery.com>
+Tested-by: Benjamin Gaignard <benjamin.gaignard@linaro.org>
+Tested-by: Robert Nelson <robertcnelson@gmail.com>
+Tested-by: Barry Song <Baohua.Song@csr.com>
+(cherry picked from commit 2fe86e0004076128f05d5a774b5c9c03d9dc3de2)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+---
+ mm/compaction.c | 75 +++++++++++++++++++++++++++++++++++++++---------------
+ 1 files changed, 54 insertions(+), 21 deletions(-)
+
+diff --git a/mm/compaction.c b/mm/compaction.c
+index 459b0ab..0304ff6 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -250,31 +250,34 @@ typedef enum {
+ ISOLATE_SUCCESS, /* Pages isolated, migrate */
+ } isolate_migrate_t;
+
+-/*
+- * Isolate all pages that can be migrated from the block pointed to by
+- * the migrate scanner within compact_control.
++/**
++ * isolate_migratepages_range() - isolate all migrate-able pages in range.
++ * @zone: Zone pages are in.
++ * @cc: Compaction control structure.
++ * @low_pfn: The first PFN of the range.
++ * @end_pfn: The one-past-the-last PFN of the range.
++ *
++ * Isolate all pages that can be migrated from the range specified by
++ * [low_pfn, end_pfn). Returns zero if there is a fatal signal
++ * pending), otherwise PFN of the first page that was not scanned
++ * (which may be both less, equal to or more then end_pfn).
++ *
++ * Assumes that cc->migratepages is empty and cc->nr_migratepages is
++ * zero.
++ *
++ * Apart from cc->migratepages and cc->nr_migratetypes this function
++ * does not modify any cc's fields, in particular it does not modify
++ * (or read for that matter) cc->migrate_pfn.
+ */
+-static isolate_migrate_t isolate_migratepages(struct zone *zone,
+- struct compact_control *cc)
++static unsigned long
++isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
++ unsigned long low_pfn, unsigned long end_pfn)
+ {
+- unsigned long low_pfn, end_pfn;
+ unsigned long last_pageblock_nr = 0, pageblock_nr;
+ unsigned long nr_scanned = 0, nr_isolated = 0;
+ struct list_head *migratelist = &cc->migratepages;
+ isolate_mode_t mode = ISOLATE_ACTIVE|ISOLATE_INACTIVE;
+
+- /* Do not scan outside zone boundaries */
+- low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
+-
+- /* Only scan within a pageblock boundary */
+- end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages);
+-
+- /* Do not cross the free scanner or scan within a memory hole */
+- if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
+- cc->migrate_pfn = end_pfn;
+- return ISOLATE_NONE;
+- }
+-
+ /*
+ * Ensure that there are not too many pages isolated from the LRU
+ * list by either parallel reclaimers or compaction. If there are,
+@@ -283,12 +286,12 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
+ while (unlikely(too_many_isolated(zone))) {
+ /* async migration should just abort */
+ if (!cc->sync)
+- return ISOLATE_ABORT;
++ return 0;
+
+ congestion_wait(BLK_RW_ASYNC, HZ/10);
+
+ if (fatal_signal_pending(current))
+- return ISOLATE_ABORT;
++ return 0;
+ }
+
+ /* Time to isolate some pages for migration */
+@@ -396,10 +399,40 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
+ acct_isolated(zone, cc);
+
+ spin_unlock_irq(&zone->lru_lock);
+- cc->migrate_pfn = low_pfn;
+
+ trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
+
++ return low_pfn;
++}
++
++/*
++ * Isolate all pages that can be migrated from the block pointed to by
++ * the migrate scanner within compact_control.
++ */
++static isolate_migrate_t isolate_migratepages(struct zone *zone,
++ struct compact_control *cc)
++{
++ unsigned long low_pfn, end_pfn;
++
++ /* Do not scan outside zone boundaries */
++ low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
++
++ /* Only scan within a pageblock boundary */
++ end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages);
++
++ /* Do not cross the free scanner or scan within a memory hole */
++ if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
++ cc->migrate_pfn = end_pfn;
++ return ISOLATE_NONE;
++ }
++
++ /* Perform the isolation */
++ low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn);
++ if (!low_pfn)
++ return ISOLATE_ABORT;
++
++ cc->migrate_pfn = low_pfn;
++
+ return ISOLATE_SUCCESS;
+ }
+
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/mm-compaction-introduce-map_pages.patch b/patches.dma-mapping/mm-compaction-introduce-map_pages.patch
new file mode 100644
index 0000000000000..f081053d17852
--- /dev/null
+++ b/patches.dma-mapping/mm-compaction-introduce-map_pages.patch
@@ -0,0 +1,65 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:51:53 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:50:20 +0900
+Subject: [PATCH v2 03/58] mm: compaction: introduce map_pages()
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-4-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Michal Nazarewicz <mina86@mina86.com>
+
+This commit creates a map_pages() function which map pages freed
+using split_free_pages(). This merely moves some code from
+isolate_freepages() so that it can be reused in other places.
+
+Signed-off-by: Michal Nazarewicz <mina86@mina86.com>
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Acked-by: Mel Gorman <mel@csn.ul.ie>
+Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
+Tested-by: Robert Nelson <robertcnelson@gmail.com>
+Tested-by: Barry Song <Baohua.Song@csr.com>
+(cherry picked from commit 03d44192f69a45d780ba124f691e76020a44ebae)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+---
+ mm/compaction.c | 15 +++++++++++----
+ 1 files changed, 11 insertions(+), 4 deletions(-)
+
+diff --git a/mm/compaction.c b/mm/compaction.c
+index 0304ff6..f740488 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -127,6 +127,16 @@ static bool suitable_migration_target(struct page *page)
+ return false;
+ }
+
++static void map_pages(struct list_head *list)
++{
++ struct page *page;
++
++ list_for_each_entry(page, list, lru) {
++ arch_alloc_page(page, 0);
++ kernel_map_pages(page, 1, 1);
++ }
++}
++
+ /*
+ * Based on information in the current compact_control, find blocks
+ * suitable for isolating free pages from and then isolate them.
+@@ -206,10 +216,7 @@ static void isolate_freepages(struct zone *zone,
+ }
+
+ /* split_free_page does not map the pages */
+- list_for_each_entry(page, freelist, lru) {
+- arch_alloc_page(page, 0);
+- kernel_map_pages(page, 1, 1);
+- }
++ map_pages(freelist);
+
+ cc->free_pfn = high_pfn;
+ cc->nr_freepages = nr_freepages;
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/mm-extract-reclaim-code-from-__alloc_pages_direct_reclaim.patch b/patches.dma-mapping/mm-extract-reclaim-code-from-__alloc_pages_direct_reclaim.patch
new file mode 100644
index 0000000000000..803bd3bbd023f
--- /dev/null
+++ b/patches.dma-mapping/mm-extract-reclaim-code-from-__alloc_pages_direct_reclaim.patch
@@ -0,0 +1,92 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:52:12 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:50:28 +0900
+Subject: [PATCH v2 11/58] mm: extract reclaim code from __alloc_pages_direct_reclaim()
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-12-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+
+This patch extracts common reclaim code from __alloc_pages_direct_reclaim()
+function to separate function: __perform_reclaim() which can be later used
+by alloc_contig_range().
+
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
+Cc: Michal Nazarewicz <mina86@mina86.com>
+Acked-by: Mel Gorman <mel@csn.ul.ie>
+Tested-by: Rob Clark <rob.clark@linaro.org>
+Tested-by: Ohad Ben-Cohen <ohad@wizery.com>
+Tested-by: Benjamin Gaignard <benjamin.gaignard@linaro.org>
+Tested-by: Robert Nelson <robertcnelson@gmail.com>
+Tested-by: Barry Song <Baohua.Song@csr.com>
+(cherry picked from commit bba9071087108d3de70bea274e35064cc480487b)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+---
+ mm/page_alloc.c | 30 +++++++++++++++++++++---------
+ 1 files changed, 21 insertions(+), 9 deletions(-)
+
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 8be37bc..4615531 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -2130,16 +2130,13 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
+ }
+ #endif /* CONFIG_COMPACTION */
+
+-/* The really slow allocator path where we enter direct reclaim */
+-static inline struct page *
+-__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
+- struct zonelist *zonelist, enum zone_type high_zoneidx,
+- nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
+- int migratetype, unsigned long *did_some_progress)
++/* Perform direct synchronous page reclaim */
++static int
++__perform_reclaim(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist,
++ nodemask_t *nodemask)
+ {
+- struct page *page = NULL;
+ struct reclaim_state reclaim_state;
+- bool drained = false;
++ int progress;
+
+ cond_resched();
+
+@@ -2150,7 +2147,7 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
+ reclaim_state.reclaimed_slab = 0;
+ current->reclaim_state = &reclaim_state;
+
+- *did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
++ progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
+
+ current->reclaim_state = NULL;
+ lockdep_clear_current_reclaim_state();
+@@ -2158,6 +2155,21 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
+
+ cond_resched();
+
++ return progress;
++}
++
++/* The really slow allocator path where we enter direct reclaim */
++static inline struct page *
++__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
++ struct zonelist *zonelist, enum zone_type high_zoneidx,
++ nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
++ int migratetype, unsigned long *did_some_progress)
++{
++ struct page *page = NULL;
++ bool drained = false;
++
++ *did_some_progress = __perform_reclaim(gfp_mask, order, zonelist,
++ nodemask);
+ if (unlikely(!(*did_some_progress)))
+ return NULL;
+
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/mm-factor-out-memory-isolate-functions.patch b/patches.dma-mapping/mm-factor-out-memory-isolate-functions.patch
new file mode 100644
index 0000000000000..4f583cafaf0f0
--- /dev/null
+++ b/patches.dma-mapping/mm-factor-out-memory-isolate-functions.patch
@@ -0,0 +1,351 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:53:43 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:51:03 +0900
+Subject: [PATCH v2 46/58] mm: factor out memory isolate functions
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-47-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Minchan Kim <minchan@kernel.org>
+
+mm/page_alloc.c has some memory isolation functions but they are used only
+when we enable CONFIG_{CMA|MEMORY_HOTPLUG|MEMORY_FAILURE}. So let's make
+it configurable by new CONFIG_MEMORY_ISOLATION so that it can reduce
+binary size and we can check it simple by CONFIG_MEMORY_ISOLATION, not if
+defined CONFIG_{CMA|MEMORY_HOTPLUG|MEMORY_FAILURE}.
+
+Signed-off-by: Minchan Kim <minchan@kernel.org>
+Cc: Andi Kleen <andi@firstfloor.org>
+Cc: Marek Szyprowski <m.szyprowski@samsung.com>
+Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
+Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
+Cc: Mel Gorman <mgorman@suse.de>
+Cc: Michal Hocko <mhocko@suse.cz>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+(cherry picked from commit ee6f509c3274014d1f52e7a7a10aee9f85393c5e)
+
+Conflicts:
+
+ mm/Makefile
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+---
+ drivers/base/Kconfig | 1 +
+ include/linux/page-isolation.h | 13 ++++--
+ mm/Kconfig | 5 ++
+ mm/Makefile | 4 +-
+ mm/page_alloc.c | 80 ++--------------------------------------
+ mm/page_isolation.c | 71 +++++++++++++++++++++++++++++++++++
+ 6 files changed, 93 insertions(+), 81 deletions(-)
+
+diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
+index 9b21469..08b4c52 100644
+--- a/drivers/base/Kconfig
++++ b/drivers/base/Kconfig
+@@ -196,6 +196,7 @@ config CMA
+ bool "Contiguous Memory Allocator (EXPERIMENTAL)"
+ depends on HAVE_DMA_CONTIGUOUS && HAVE_MEMBLOCK && EXPERIMENTAL
+ select MIGRATION
++ select MEMORY_ISOLATION
+ help
+ This enables the Contiguous Memory Allocator which allows drivers
+ to allocate big physically-contiguous blocks of memory for use with
+diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
+index 3bdcab3..105077a 100644
+--- a/include/linux/page-isolation.h
++++ b/include/linux/page-isolation.h
+@@ -1,6 +1,11 @@
+ #ifndef __LINUX_PAGEISOLATION_H
+ #define __LINUX_PAGEISOLATION_H
+
++
++bool has_unmovable_pages(struct zone *zone, struct page *page, int count);
++void set_pageblock_migratetype(struct page *page, int migratetype);
++int move_freepages_block(struct zone *zone, struct page *page,
++ int migratetype);
+ /*
+ * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE.
+ * If specified range includes migrate types other than MOVABLE or CMA,
+@@ -10,7 +15,7 @@
+ * free all pages in the range. test_page_isolated() can be used for
+ * test it.
+ */
+-extern int
++int
+ start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
+ unsigned migratetype);
+
+@@ -18,7 +23,7 @@ start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
+ * Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE.
+ * target range is [start_pfn, end_pfn)
+ */
+-extern int
++int
+ undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
+ unsigned migratetype);
+
+@@ -30,8 +35,8 @@ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn);
+ /*
+ * Internal functions. Changes pageblock's migrate type.
+ */
+-extern int set_migratetype_isolate(struct page *page);
+-extern void unset_migratetype_isolate(struct page *page, unsigned migratetype);
++int set_migratetype_isolate(struct page *page);
++void unset_migratetype_isolate(struct page *page, unsigned migratetype);
+
+
+ #endif
+diff --git a/mm/Kconfig b/mm/Kconfig
+index 3922002..d85a959 100644
+--- a/mm/Kconfig
++++ b/mm/Kconfig
+@@ -140,9 +140,13 @@ config ARCH_DISCARD_MEMBLOCK
+ config NO_BOOTMEM
+ boolean
+
++config MEMORY_ISOLATION
++ boolean
++
+ # eventually, we can have this option just 'select SPARSEMEM'
+ config MEMORY_HOTPLUG
+ bool "Allow for memory hot-add"
++ select MEMORY_ISOLATION
+ depends on SPARSEMEM || X86_64_ACPI_NUMA
+ depends on HOTPLUG && ARCH_ENABLE_MEMORY_HOTPLUG
+ depends on (IA64 || X86 || PPC_BOOK3S_64 || SUPERH || S390)
+@@ -272,6 +276,7 @@ config MEMORY_FAILURE
+ depends on MMU
+ depends on ARCH_SUPPORTS_MEMORY_FAILURE
+ bool "Enable recovery from hardware memory errors"
++ select MEMORY_ISOLATION
+ help
+ Enables code to recover from some memory failures on systems
+ with MCA recovery. This allows a system to continue running
+diff --git a/mm/Makefile b/mm/Makefile
+index 8aada89..31b0d59 100644
+--- a/mm/Makefile
++++ b/mm/Makefile
+@@ -12,8 +12,9 @@ obj-y := filemap.o mempool.o oom_kill.o fadvise.o \
+ maccess.o page_alloc.o page-writeback.o \
+ readahead.o swap.o truncate.o vmscan.o shmem.o \
+ prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \
+- page_isolation.o mm_init.o mmu_context.o percpu.o \
++ mm_init.o mmu_context.o percpu.o \
+ compaction.o $(mmu-y)
++
+ obj-y += init-mm.o
+
+ ifdef CONFIG_NO_BOOTMEM
+@@ -50,3 +51,4 @@ obj-$(CONFIG_HWPOISON_INJECT) += hwpoison-inject.o
+ obj-$(CONFIG_DEBUG_KMEMLEAK) += kmemleak.o
+ obj-$(CONFIG_DEBUG_KMEMLEAK_TEST) += kmemleak-test.o
+ obj-$(CONFIG_CLEANCACHE) += cleancache.o
++obj-$(CONFIG_MEMORY_ISOLATION) += page_isolation.o
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 84f82e3..3b17dcd 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -51,7 +51,6 @@
+ #include <linux/page_cgroup.h>
+ #include <linux/debugobjects.h>
+ #include <linux/kmemleak.h>
+-#include <linux/memory.h>
+ #include <linux/compaction.h>
+ #include <trace/events/kmem.h>
+ #include <linux/ftrace_event.h>
+@@ -219,7 +218,7 @@ EXPORT_SYMBOL(nr_online_nodes);
+
+ int page_group_by_mobility_disabled __read_mostly;
+
+-static void set_pageblock_migratetype(struct page *page, int migratetype)
++void set_pageblock_migratetype(struct page *page, int migratetype)
+ {
+
+ if (unlikely(page_group_by_mobility_disabled))
+@@ -954,7 +953,7 @@ static int move_freepages(struct zone *zone,
+ return pages_moved;
+ }
+
+-static int move_freepages_block(struct zone *zone, struct page *page,
++int move_freepages_block(struct zone *zone, struct page *page,
+ int migratetype)
+ {
+ unsigned long start_pfn, end_pfn;
+@@ -5478,8 +5477,7 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags,
+ * MIGRATE_MOVABLE block might include unmovable pages. It means you can't
+ * expect this function should be exact.
+ */
+-static bool
+-__has_unmovable_pages(struct zone *zone, struct page *page, int count)
++bool has_unmovable_pages(struct zone *zone, struct page *page, int count)
+ {
+ unsigned long pfn, iter, found;
+ int mt;
+@@ -5549,77 +5547,7 @@ bool is_pageblock_removable_nolock(struct page *page)
+ zone->zone_start_pfn + zone->spanned_pages <= pfn)
+ return false;
+
+- return !__has_unmovable_pages(zone, page, 0);
+-}
+-
+-int set_migratetype_isolate(struct page *page)
+-{
+- struct zone *zone;
+- unsigned long flags, pfn;
+- struct memory_isolate_notify arg;
+- int notifier_ret;
+- int ret = -EBUSY;
+-
+- zone = page_zone(page);
+-
+- spin_lock_irqsave(&zone->lock, flags);
+-
+- pfn = page_to_pfn(page);
+- arg.start_pfn = pfn;
+- arg.nr_pages = pageblock_nr_pages;
+- arg.pages_found = 0;
+-
+- /*
+- * It may be possible to isolate a pageblock even if the
+- * migratetype is not MIGRATE_MOVABLE. The memory isolation
+- * notifier chain is used by balloon drivers to return the
+- * number of pages in a range that are held by the balloon
+- * driver to shrink memory. If all the pages are accounted for
+- * by balloons, are free, or on the LRU, isolation can continue.
+- * Later, for example, when memory hotplug notifier runs, these
+- * pages reported as "can be isolated" should be isolated(freed)
+- * by the balloon driver through the memory notifier chain.
+- */
+- notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
+- notifier_ret = notifier_to_errno(notifier_ret);
+- if (notifier_ret)
+- goto out;
+- /*
+- * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
+- * We just check MOVABLE pages.
+- */
+- if (!__has_unmovable_pages(zone, page, arg.pages_found))
+- ret = 0;
+- /*
+- * Unmovable means "not-on-lru" pages. If Unmovable pages are
+- * larger than removable-by-driver pages reported by notifier,
+- * we'll fail.
+- */
+-
+-out:
+- if (!ret) {
+- set_pageblock_migratetype(page, MIGRATE_ISOLATE);
+- move_freepages_block(zone, page, MIGRATE_ISOLATE);
+- }
+-
+- spin_unlock_irqrestore(&zone->lock, flags);
+- if (!ret)
+- drain_all_pages();
+- return ret;
+-}
+-
+-void unset_migratetype_isolate(struct page *page, unsigned migratetype)
+-{
+- struct zone *zone;
+- unsigned long flags;
+- zone = page_zone(page);
+- spin_lock_irqsave(&zone->lock, flags);
+- if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
+- goto out;
+- set_pageblock_migratetype(page, migratetype);
+- move_freepages_block(zone, page, migratetype);
+-out:
+- spin_unlock_irqrestore(&zone->lock, flags);
++ return !has_unmovable_pages(zone, page, 0);
+ }
+
+ #ifdef CONFIG_CMA
+diff --git a/mm/page_isolation.c b/mm/page_isolation.c
+index c9f0477..fb482cf 100644
+--- a/mm/page_isolation.c
++++ b/mm/page_isolation.c
+@@ -5,8 +5,79 @@
+ #include <linux/mm.h>
+ #include <linux/page-isolation.h>
+ #include <linux/pageblock-flags.h>
++#include <linux/memory.h>
+ #include "internal.h"
+
++int set_migratetype_isolate(struct page *page)
++{
++ struct zone *zone;
++ unsigned long flags, pfn;
++ struct memory_isolate_notify arg;
++ int notifier_ret;
++ int ret = -EBUSY;
++
++ zone = page_zone(page);
++
++ spin_lock_irqsave(&zone->lock, flags);
++
++ pfn = page_to_pfn(page);
++ arg.start_pfn = pfn;
++ arg.nr_pages = pageblock_nr_pages;
++ arg.pages_found = 0;
++
++ /*
++ * It may be possible to isolate a pageblock even if the
++ * migratetype is not MIGRATE_MOVABLE. The memory isolation
++ * notifier chain is used by balloon drivers to return the
++ * number of pages in a range that are held by the balloon
++ * driver to shrink memory. If all the pages are accounted for
++ * by balloons, are free, or on the LRU, isolation can continue.
++ * Later, for example, when memory hotplug notifier runs, these
++ * pages reported as "can be isolated" should be isolated(freed)
++ * by the balloon driver through the memory notifier chain.
++ */
++ notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg);
++ notifier_ret = notifier_to_errno(notifier_ret);
++ if (notifier_ret)
++ goto out;
++ /*
++ * FIXME: Now, memory hotplug doesn't call shrink_slab() by itself.
++ * We just check MOVABLE pages.
++ */
++ if (!has_unmovable_pages(zone, page, arg.pages_found))
++ ret = 0;
++
++ /*
++ * immobile means "not-on-lru" paes. If immobile is larger than
++ * removable-by-driver pages reported by notifier, we'll fail.
++ */
++
++out:
++ if (!ret) {
++ set_pageblock_migratetype(page, MIGRATE_ISOLATE);
++ move_freepages_block(zone, page, MIGRATE_ISOLATE);
++ }
++
++ spin_unlock_irqrestore(&zone->lock, flags);
++ if (!ret)
++ drain_all_pages();
++ return ret;
++}
++
++void unset_migratetype_isolate(struct page *page, unsigned migratetype)
++{
++ struct zone *zone;
++ unsigned long flags;
++ zone = page_zone(page);
++ spin_lock_irqsave(&zone->lock, flags);
++ if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
++ goto out;
++ set_pageblock_migratetype(page, migratetype);
++ move_freepages_block(zone, page, migratetype);
++out:
++ spin_unlock_irqrestore(&zone->lock, flags);
++}
++
+ static inline struct page *
+ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
+ {
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/mm-mmzone-migrate_cma-migration-type-added.patch b/patches.dma-mapping/mm-mmzone-migrate_cma-migration-type-added.patch
new file mode 100644
index 0000000000000..7ac2eeb0f80b2
--- /dev/null
+++ b/patches.dma-mapping/mm-mmzone-migrate_cma-migration-type-added.patch
@@ -0,0 +1,327 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:52:06 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:50:25 +0900
+Subject: [PATCH v2 08/58] mm: mmzone: MIGRATE_CMA migration type added
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-9-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Michal Nazarewicz <mina86@mina86.com>
+
+The MIGRATE_CMA migration type has two main characteristics:
+(i) only movable pages can be allocated from MIGRATE_CMA
+pageblocks and (ii) page allocator will never change migration
+type of MIGRATE_CMA pageblocks.
+
+This guarantees (to some degree) that page in a MIGRATE_CMA page
+block can always be migrated somewhere else (unless there's no
+memory left in the system).
+
+It is designed to be used for allocating big chunks (eg. 10MiB)
+of physically contiguous memory. Once driver requests
+contiguous memory, pages from MIGRATE_CMA pageblocks may be
+migrated away to create a contiguous block.
+
+To minimise number of migrations, MIGRATE_CMA migration type
+is the last type tried when page allocator falls back to other
+migration types when requested.
+
+Signed-off-by: Michal Nazarewicz <mina86@mina86.com>
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
+Acked-by: Mel Gorman <mel@csn.ul.ie>
+Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
+Tested-by: Rob Clark <rob.clark@linaro.org>
+Tested-by: Ohad Ben-Cohen <ohad@wizery.com>
+Tested-by: Benjamin Gaignard <benjamin.gaignard@linaro.org>
+Tested-by: Robert Nelson <robertcnelson@gmail.com>
+Tested-by: Barry Song <Baohua.Song@csr.com>
+(cherry picked from commit 47118af076f64844b4f423bc2f545b2da9dab50d)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+---
+ include/linux/gfp.h | 3 ++
+ include/linux/mmzone.h | 38 +++++++++++++++++++----
+ mm/Kconfig | 2 +-
+ mm/compaction.c | 11 +++++--
+ mm/page_alloc.c | 76 +++++++++++++++++++++++++++++++++++++----------
+ mm/vmstat.c | 3 ++
+ 6 files changed, 106 insertions(+), 27 deletions(-)
+
+diff --git a/include/linux/gfp.h b/include/linux/gfp.h
+index 052a5b6..78d32a7 100644
+--- a/include/linux/gfp.h
++++ b/include/linux/gfp.h
+@@ -397,6 +397,9 @@ static inline bool pm_suspended_storage(void)
+ extern int alloc_contig_range(unsigned long start, unsigned long end);
+ extern void free_contig_range(unsigned long pfn, unsigned nr_pages);
+
++/* CMA stuff */
++extern void init_cma_reserved_pageblock(struct page *page);
++
+ #endif
+
+ #endif /* __LINUX_GFP_H */
+diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
+index 5f6806b..7d2db87 100644
+--- a/include/linux/mmzone.h
++++ b/include/linux/mmzone.h
+@@ -35,13 +35,37 @@
+ */
+ #define PAGE_ALLOC_COSTLY_ORDER 3
+
+-#define MIGRATE_UNMOVABLE 0
+-#define MIGRATE_RECLAIMABLE 1
+-#define MIGRATE_MOVABLE 2
+-#define MIGRATE_PCPTYPES 3 /* the number of types on the pcp lists */
+-#define MIGRATE_RESERVE 3
+-#define MIGRATE_ISOLATE 4 /* can't allocate from here */
+-#define MIGRATE_TYPES 5
++enum {
++ MIGRATE_UNMOVABLE,
++ MIGRATE_RECLAIMABLE,
++ MIGRATE_MOVABLE,
++ MIGRATE_PCPTYPES, /* the number of types on the pcp lists */
++ MIGRATE_RESERVE = MIGRATE_PCPTYPES,
++#ifdef CONFIG_CMA
++ /*
++ * MIGRATE_CMA migration type is designed to mimic the way
++ * ZONE_MOVABLE works. Only movable pages can be allocated
++ * from MIGRATE_CMA pageblocks and page allocator never
++ * implicitly change migration type of MIGRATE_CMA pageblock.
++ *
++ * The way to use it is to change migratetype of a range of
++ * pageblocks to MIGRATE_CMA which can be done by
++ * __free_pageblock_cma() function. What is important though
++ * is that a range of pageblocks must be aligned to
++ * MAX_ORDER_NR_PAGES should biggest page be bigger then
++ * a single pageblock.
++ */
++ MIGRATE_CMA,
++#endif
++ MIGRATE_ISOLATE, /* can't allocate from here */
++ MIGRATE_TYPES
++};
++
++#ifdef CONFIG_CMA
++# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
++#else
++# define is_migrate_cma(migratetype) false
++#endif
+
+ #define for_each_migratetype_order(order, type) \
+ for (order = 0; order < MAX_ORDER; order++) \
+diff --git a/mm/Kconfig b/mm/Kconfig
+index e338407..3922002 100644
+--- a/mm/Kconfig
++++ b/mm/Kconfig
+@@ -198,7 +198,7 @@ config COMPACTION
+ config MIGRATION
+ bool "Page migration"
+ def_bool y
+- depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION
++ depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION || CMA
+ help
+ Allows the migration of the physical location of pages of processes
+ while the virtual addresses are not changed. This is useful in
+diff --git a/mm/compaction.c b/mm/compaction.c
+index dcb3865..eede981 100644
+--- a/mm/compaction.c
++++ b/mm/compaction.c
+@@ -45,6 +45,11 @@ static void map_pages(struct list_head *list)
+ }
+ }
+
++static inline bool migrate_async_suitable(int migratetype)
++{
++ return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
++}
++
+ /*
+ * Isolate free pages onto a private freelist. Caller must hold zone->lock.
+ * If @strict is true, will abort returning 0 on any invalid PFNs or non-free
+@@ -299,7 +304,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
+ */
+ pageblock_nr = low_pfn >> pageblock_order;
+ if (!cc->sync && last_pageblock_nr != pageblock_nr &&
+- get_pageblock_migratetype(page) != MIGRATE_MOVABLE) {
++ !migrate_async_suitable(get_pageblock_migratetype(page))) {
+ low_pfn += pageblock_nr_pages;
+ low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1;
+ last_pageblock_nr = pageblock_nr;
+@@ -367,8 +372,8 @@ static bool suitable_migration_target(struct page *page)
+ if (PageBuddy(page) && page_order(page) >= pageblock_order)
+ return true;
+
+- /* If the block is MIGRATE_MOVABLE, allow migration */
+- if (migratetype == MIGRATE_MOVABLE)
++ /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
++ if (migrate_async_suitable(migratetype))
+ return true;
+
+ /* Otherwise skip the block */
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index d6b580c..0869eb1 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -750,6 +750,24 @@ void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
+ __free_pages(page, order);
+ }
+
++#ifdef CONFIG_CMA
++/* Free whole pageblock and set it's migration type to MIGRATE_CMA. */
++void __init init_cma_reserved_pageblock(struct page *page)
++{
++ unsigned i = pageblock_nr_pages;
++ struct page *p = page;
++
++ do {
++ __ClearPageReserved(p);
++ set_page_count(p, 0);
++ } while (++p, --i);
++
++ set_page_refcounted(page);
++ set_pageblock_migratetype(page, MIGRATE_CMA);
++ __free_pages(page, pageblock_order);
++ totalram_pages += pageblock_nr_pages;
++}
++#endif
+
+ /*
+ * The order of subdivision here is critical for the IO subsystem.
+@@ -875,10 +893,15 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
+ * This array describes the order lists are fallen back to when
+ * the free lists for the desirable migrate type are depleted
+ */
+-static int fallbacks[MIGRATE_TYPES][3] = {
+- [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
+- [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
+- [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
++static int fallbacks[MIGRATE_TYPES][4] = {
++ [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
++ [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
++#ifdef CONFIG_CMA
++ [MIGRATE_MOVABLE] = { MIGRATE_CMA, MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
++ [MIGRATE_CMA] = { MIGRATE_RESERVE }, /* Never used */
++#else
++ [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
++#endif
+ [MIGRATE_RESERVE] = { MIGRATE_RESERVE }, /* Never used */
+ [MIGRATE_ISOLATE] = { MIGRATE_RESERVE }, /* Never used */
+ };
+@@ -995,11 +1018,18 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
+ * pages to the preferred allocation list. If falling
+ * back for a reclaimable kernel allocation, be more
+ * aggressive about taking ownership of free pages
++ *
++ * On the other hand, never change migration
++ * type of MIGRATE_CMA pageblocks nor move CMA
++ * pages on different free lists. We don't
++ * want unmovable pages to be allocated from
++ * MIGRATE_CMA areas.
+ */
+- if (unlikely(current_order >= (pageblock_order >> 1)) ||
+- start_migratetype == MIGRATE_RECLAIMABLE ||
+- page_group_by_mobility_disabled) {
+- unsigned long pages;
++ if (!is_migrate_cma(migratetype) &&
++ (unlikely(current_order >= pageblock_order / 2) ||
++ start_migratetype == MIGRATE_RECLAIMABLE ||
++ page_group_by_mobility_disabled)) {
++ int pages;
+ pages = move_freepages_block(zone, page,
+ start_migratetype);
+
+@@ -1017,11 +1047,14 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
+ rmv_page_order(page);
+
+ /* Take ownership for orders >= pageblock_order */
+- if (current_order >= pageblock_order)
++ if (current_order >= pageblock_order &&
++ !is_migrate_cma(migratetype))
+ change_pageblock_range(page, current_order,
+ start_migratetype);
+
+- expand(zone, page, order, current_order, area, migratetype);
++ expand(zone, page, order, current_order, area,
++ is_migrate_cma(migratetype)
++ ? migratetype : start_migratetype);
+
+ trace_mm_page_alloc_extfrag(page, order, current_order,
+ start_migratetype, migratetype);
+@@ -1072,7 +1105,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
+ unsigned long count, struct list_head *list,
+ int migratetype, int cold)
+ {
+- int i;
++ int mt = migratetype, i;
+
+ spin_lock(&zone->lock);
+ for (i = 0; i < count; ++i) {
+@@ -1093,7 +1126,12 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
+ list_add(&page->lru, list);
+ else
+ list_add_tail(&page->lru, list);
+- set_page_private(page, migratetype);
++ if (IS_ENABLED(CONFIG_CMA)) {
++ mt = get_pageblock_migratetype(page);
++ if (!is_migrate_cma(mt) && mt != MIGRATE_ISOLATE)
++ mt = migratetype;
++ }
++ set_page_private(page, mt);
+ list = &page->lru;
+ }
+ __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
+@@ -1373,8 +1411,12 @@ int split_free_page(struct page *page)
+
+ if (order >= pageblock_order - 1) {
+ struct page *endpage = page + (1 << order) - 1;
+- for (; page < endpage; page += pageblock_nr_pages)
+- set_pageblock_migratetype(page, MIGRATE_MOVABLE);
++ for (; page < endpage; page += pageblock_nr_pages) {
++ int mt = get_pageblock_migratetype(page);
++ if (mt != MIGRATE_ISOLATE && !is_migrate_cma(mt))
++ set_pageblock_migratetype(page,
++ MIGRATE_MOVABLE);
++ }
+ }
+
+ return 1 << order;
+@@ -5414,14 +5456,16 @@ static int
+ __count_immobile_pages(struct zone *zone, struct page *page, int count)
+ {
+ unsigned long pfn, iter, found;
++ int mt;
++
+ /*
+ * For avoiding noise data, lru_add_drain_all() should be called
+ * If ZONE_MOVABLE, the zone never contains immobile pages
+ */
+ if (zone_idx(zone) == ZONE_MOVABLE)
+ return true;
+-
+- if (get_pageblock_migratetype(page) == MIGRATE_MOVABLE)
++ mt = get_pageblock_migratetype(page);
++ if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt))
+ return true;
+
+ pfn = page_to_pfn(page);
+diff --git a/mm/vmstat.c b/mm/vmstat.c
+index 7db1b9b..0dad31dc 100644
+--- a/mm/vmstat.c
++++ b/mm/vmstat.c
+@@ -613,6 +613,9 @@ static char * const migratetype_names[MIGRATE_TYPES] = {
+ "Reclaimable",
+ "Movable",
+ "Reserve",
++#ifdef CONFIG_CMA
++ "CMA",
++#endif
+ "Isolate",
+ };
+
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/mm-page_alloc-change-fallbacks-array-handling.patch b/patches.dma-mapping/mm-page_alloc-change-fallbacks-array-handling.patch
new file mode 100644
index 0000000000000..d5f968dfa4b05
--- /dev/null
+++ b/patches.dma-mapping/mm-page_alloc-change-fallbacks-array-handling.patch
@@ -0,0 +1,71 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:52:02 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:50:24 +0900
+Subject: [PATCH v2 07/58] mm: page_alloc: change fallbacks array handling
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-8-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Michal Nazarewicz <mina86@mina86.com>
+
+This commit adds a row for MIGRATE_ISOLATE type to the fallbacks array
+which was missing from it. It also, changes the array traversal logic
+a little making MIGRATE_RESERVE an end marker. The letter change,
+removes the implicit MIGRATE_UNMOVABLE from the end of each row which
+was read by __rmqueue_fallback() function.
+
+Signed-off-by: Michal Nazarewicz <mina86@mina86.com>
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Acked-by: Mel Gorman <mel@csn.ul.ie>
+Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
+Tested-by: Rob Clark <rob.clark@linaro.org>
+Tested-by: Ohad Ben-Cohen <ohad@wizery.com>
+Tested-by: Benjamin Gaignard <benjamin.gaignard@linaro.org>
+Tested-by: Robert Nelson <robertcnelson@gmail.com>
+Tested-by: Barry Song <Baohua.Song@csr.com>
+(cherry picked from commit 6d4a49160de2c684fb59fa627bce80e200224331)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+---
+ mm/page_alloc.c | 9 +++++----
+ 1 files changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 2c38a30..d6b580c 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -875,11 +875,12 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
+ * This array describes the order lists are fallen back to when
+ * the free lists for the desirable migrate type are depleted
+ */
+-static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
++static int fallbacks[MIGRATE_TYPES][3] = {
+ [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
+ [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
+ [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
+- [MIGRATE_RESERVE] = { MIGRATE_RESERVE, MIGRATE_RESERVE, MIGRATE_RESERVE }, /* Never used */
++ [MIGRATE_RESERVE] = { MIGRATE_RESERVE }, /* Never used */
++ [MIGRATE_ISOLATE] = { MIGRATE_RESERVE }, /* Never used */
+ };
+
+ /*
+@@ -974,12 +975,12 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
+ /* Find the largest possible block of pages in the other list */
+ for (current_order = MAX_ORDER-1; current_order >= order;
+ --current_order) {
+- for (i = 0; i < MIGRATE_TYPES - 1; i++) {
++ for (i = 0;; i++) {
+ migratetype = fallbacks[start_migratetype][i];
+
+ /* MIGRATE_RESERVE handled later if necessary */
+ if (migratetype == MIGRATE_RESERVE)
+- continue;
++ break;
+
+ area = &(zone->free_area[current_order]);
+ if (list_empty(&area->free_list[migratetype]))
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/mm-page_alloc-introduce-alloc_contig_range.patch b/patches.dma-mapping/mm-page_alloc-introduce-alloc_contig_range.patch
new file mode 100644
index 0000000000000..e2e7c2063f139
--- /dev/null
+++ b/patches.dma-mapping/mm-page_alloc-introduce-alloc_contig_range.patch
@@ -0,0 +1,261 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:52:00 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:50:23 +0900
+Subject: [PATCH v2 06/58] mm: page_alloc: introduce alloc_contig_range()
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-7-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Michal Nazarewicz <mina86@mina86.com>
+
+This commit adds the alloc_contig_range() function which tries
+to allocate given range of pages. It tries to migrate all
+already allocated pages that fall in the range thus freeing them.
+Once all pages in the range are freed they are removed from the
+buddy system thus allocated for the caller to use.
+
+Signed-off-by: Michal Nazarewicz <mina86@mina86.com>
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Acked-by: Mel Gorman <mel@csn.ul.ie>
+Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
+Tested-by: Rob Clark <rob.clark@linaro.org>
+Tested-by: Ohad Ben-Cohen <ohad@wizery.com>
+Tested-by: Benjamin Gaignard <benjamin.gaignard@linaro.org>
+Tested-by: Robert Nelson <robertcnelson@gmail.com>
+Tested-by: Barry Song <Baohua.Song@csr.com>
+(cherry picked from commit 041d3a8cdc18dc375a128d90bbb753949a81b1fb)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+---
+ include/linux/gfp.h | 8 ++
+ mm/page_alloc.c | 188 +++++++++++++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 196 insertions(+), 0 deletions(-)
+
+diff --git a/include/linux/gfp.h b/include/linux/gfp.h
+index 581e74b..052a5b6 100644
+--- a/include/linux/gfp.h
++++ b/include/linux/gfp.h
+@@ -391,4 +391,12 @@ static inline bool pm_suspended_storage(void)
+ }
+ #endif /* CONFIG_PM_SLEEP */
+
++#ifdef CONFIG_CMA
++
++/* The below functions must be run on a range from a single zone. */
++extern int alloc_contig_range(unsigned long start, unsigned long end);
++extern void free_contig_range(unsigned long pfn, unsigned nr_pages);
++
++#endif
++
+ #endif /* __LINUX_GFP_H */
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 6fb46c1..2c38a30 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -57,6 +57,7 @@
+ #include <linux/ftrace_event.h>
+ #include <linux/memcontrol.h>
+ #include <linux/prefetch.h>
++#include <linux/migrate.h>
+ #include <linux/page-debug-flags.h>
+
+ #include <asm/tlbflush.h>
+@@ -5550,6 +5551,193 @@ out:
+ spin_unlock_irqrestore(&zone->lock, flags);
+ }
+
++#ifdef CONFIG_CMA
++
++static unsigned long pfn_max_align_down(unsigned long pfn)
++{
++ return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
++ pageblock_nr_pages) - 1);
++}
++
++static unsigned long pfn_max_align_up(unsigned long pfn)
++{
++ return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
++ pageblock_nr_pages));
++}
++
++static struct page *
++__alloc_contig_migrate_alloc(struct page *page, unsigned long private,
++ int **resultp)
++{
++ return alloc_page(GFP_HIGHUSER_MOVABLE);
++}
++
++/* [start, end) must belong to a single zone. */
++static int __alloc_contig_migrate_range(unsigned long start, unsigned long end)
++{
++ /* This function is based on compact_zone() from compaction.c. */
++
++ unsigned long pfn = start;
++ unsigned int tries = 0;
++ int ret = 0;
++
++ struct compact_control cc = {
++ .nr_migratepages = 0,
++ .order = -1,
++ .zone = page_zone(pfn_to_page(start)),
++ .sync = true,
++ };
++ INIT_LIST_HEAD(&cc.migratepages);
++
++ migrate_prep_local();
++
++ while (pfn < end || !list_empty(&cc.migratepages)) {
++ if (fatal_signal_pending(current)) {
++ ret = -EINTR;
++ break;
++ }
++
++ if (list_empty(&cc.migratepages)) {
++ cc.nr_migratepages = 0;
++ pfn = isolate_migratepages_range(cc.zone, &cc,
++ pfn, end);
++ if (!pfn) {
++ ret = -EINTR;
++ break;
++ }
++ tries = 0;
++ } else if (++tries == 5) {
++ ret = ret < 0 ? ret : -EBUSY;
++ break;
++ }
++
++ ret = migrate_pages(&cc.migratepages,
++ __alloc_contig_migrate_alloc,
++ 0, false, true);
++ }
++
++ putback_lru_pages(&cc.migratepages);
++ return ret > 0 ? 0 : ret;
++}
++
++/**
++ * alloc_contig_range() -- tries to allocate given range of pages
++ * @start: start PFN to allocate
++ * @end: one-past-the-last PFN to allocate
++ *
++ * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
++ * aligned, however it's the caller's responsibility to guarantee that
++ * we are the only thread that changes migrate type of pageblocks the
++ * pages fall in.
++ *
++ * The PFN range must belong to a single zone.
++ *
++ * Returns zero on success or negative error code. On success all
++ * pages which PFN is in [start, end) are allocated for the caller and
++ * need to be freed with free_contig_range().
++ */
++int alloc_contig_range(unsigned long start, unsigned long end)
++{
++ struct zone *zone = page_zone(pfn_to_page(start));
++ unsigned long outer_start, outer_end;
++ int ret = 0, order;
++
++ /*
++ * What we do here is we mark all pageblocks in range as
++ * MIGRATE_ISOLATE. Because pageblock and max order pages may
++ * have different sizes, and due to the way page allocator
++ * work, we align the range to biggest of the two pages so
++ * that page allocator won't try to merge buddies from
++ * different pageblocks and change MIGRATE_ISOLATE to some
++ * other migration type.
++ *
++ * Once the pageblocks are marked as MIGRATE_ISOLATE, we
++ * migrate the pages from an unaligned range (ie. pages that
++ * we are interested in). This will put all the pages in
++ * range back to page allocator as MIGRATE_ISOLATE.
++ *
++ * When this is done, we take the pages in range from page
++ * allocator removing them from the buddy system. This way
++ * page allocator will never consider using them.
++ *
++ * This lets us mark the pageblocks back as
++ * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
++ * aligned range but not in the unaligned, original range are
++ * put back to page allocator so that buddy can use them.
++ */
++
++ ret = start_isolate_page_range(pfn_max_align_down(start),
++ pfn_max_align_up(end));
++ if (ret)
++ goto done;
++
++ ret = __alloc_contig_migrate_range(start, end);
++ if (ret)
++ goto done;
++
++ /*
++ * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
++ * aligned blocks that are marked as MIGRATE_ISOLATE. What's
++ * more, all pages in [start, end) are free in page allocator.
++ * What we are going to do is to allocate all pages from
++ * [start, end) (that is remove them from page allocator).
++ *
++ * The only problem is that pages at the beginning and at the
++ * end of interesting range may be not aligned with pages that
++ * page allocator holds, ie. they can be part of higher order
++ * pages. Because of this, we reserve the bigger range and
++ * once this is done free the pages we are not interested in.
++ *
++ * We don't have to hold zone->lock here because the pages are
++ * isolated thus they won't get removed from buddy.
++ */
++
++ lru_add_drain_all();
++ drain_all_pages();
++
++ order = 0;
++ outer_start = start;
++ while (!PageBuddy(pfn_to_page(outer_start))) {
++ if (++order >= MAX_ORDER) {
++ ret = -EBUSY;
++ goto done;
++ }
++ outer_start &= ~0UL << order;
++ }
++
++ /* Make sure the range is really isolated. */
++ if (test_pages_isolated(outer_start, end)) {
++ pr_warn("alloc_contig_range test_pages_isolated(%lx, %lx) failed\n",
++ outer_start, end);
++ ret = -EBUSY;
++ goto done;
++ }
++
++ outer_end = isolate_freepages_range(outer_start, end);
++ if (!outer_end) {
++ ret = -EBUSY;
++ goto done;
++ }
++
++ /* Free head and tail (if any) */
++ if (start != outer_start)
++ free_contig_range(outer_start, start - outer_start);
++ if (end != outer_end)
++ free_contig_range(end, outer_end - end);
++
++done:
++ undo_isolate_page_range(pfn_max_align_down(start),
++ pfn_max_align_up(end));
++ return ret;
++}
++
++void free_contig_range(unsigned long pfn, unsigned nr_pages)
++{
++ for (; nr_pages--; ++pfn)
++ __free_page(pfn_to_page(pfn));
++}
++#endif
++
+ #ifdef CONFIG_MEMORY_HOTREMOVE
+ /*
+ * All pages in the range must be isolated before calling this.
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/mm-page_alloc-remove-trailing-whitespace.patch b/patches.dma-mapping/mm-page_alloc-remove-trailing-whitespace.patch
new file mode 100644
index 0000000000000..c23dd26edf2be
--- /dev/null
+++ b/patches.dma-mapping/mm-page_alloc-remove-trailing-whitespace.patch
@@ -0,0 +1,73 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:51:50 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:50:18 +0900
+Subject: [PATCH v2 01/58] mm: page_alloc: remove trailing whitespace
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-2-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Michal Nazarewicz <mina86@mina86.com>
+
+Signed-off-by: Michal Nazarewicz <mina86@mina86.com>
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Acked-by: Mel Gorman <mel@csn.ul.ie>
+(cherry picked from commit 5f63b720b62925ef3c6a85473dcd547b0fd90616)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+---
+ mm/page_alloc.c | 14 +++++++-------
+ 1 files changed, 7 insertions(+), 7 deletions(-)
+
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 918330f..6fb46c1 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -513,10 +513,10 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
+ * free pages of length of (1 << order) and marked with _mapcount -2. Page's
+ * order is recorded in page_private(page) field.
+ * So when we are allocating or freeing one, we can derive the state of the
+- * other. That is, if we allocate a small block, and both were
+- * free, the remainder of the region must be split into blocks.
++ * other. That is, if we allocate a small block, and both were
++ * free, the remainder of the region must be split into blocks.
+ * If a block is freed, and its buddy is also free, then this
+- * triggers coalescing into a block of larger size.
++ * triggers coalescing into a block of larger size.
+ *
+ * -- wli
+ */
+@@ -1061,17 +1061,17 @@ retry_reserve:
+ return page;
+ }
+
+-/*
++/*
+ * Obtain a specified number of elements from the buddy allocator, all under
+ * a single hold of the lock, for efficiency. Add them to the supplied list.
+ * Returns the number of new pages which were placed at *list.
+ */
+-static int rmqueue_bulk(struct zone *zone, unsigned int order,
++static int rmqueue_bulk(struct zone *zone, unsigned int order,
+ unsigned long count, struct list_head *list,
+ int migratetype, int cold)
+ {
+ int i;
+-
++
+ spin_lock(&zone->lock);
+ for (i = 0; i < count; ++i) {
+ struct page *page = __rmqueue(zone, order, migratetype);
+@@ -4301,7 +4301,7 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
+ init_waitqueue_head(&pgdat->kswapd_wait);
+ pgdat->kswapd_max_order = 0;
+ pgdat_page_cgroup_init(pgdat);
+-
++
+ for (j = 0; j < MAX_NR_ZONES; j++) {
+ struct zone *zone = pgdat->node_zones + j;
+ unsigned long size, realsize, memmap_pages;
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/mm-page_isolation-migrate_cma-isolation-functions-added.patch b/patches.dma-mapping/mm-page_isolation-migrate_cma-isolation-functions-added.patch
new file mode 100644
index 0000000000000..42952170ac581
--- /dev/null
+++ b/patches.dma-mapping/mm-page_isolation-migrate_cma-isolation-functions-added.patch
@@ -0,0 +1,256 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:52:08 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:50:26 +0900
+Subject: [PATCH v2 09/58] mm: page_isolation: MIGRATE_CMA isolation functions added
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-10-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Michal Nazarewicz <mina86@mina86.com>
+
+This commit changes various functions that change pages and
+pageblocks migrate type between MIGRATE_ISOLATE and
+MIGRATE_MOVABLE in such a way as to allow to work with
+MIGRATE_CMA migrate type.
+
+Signed-off-by: Michal Nazarewicz <mina86@mina86.com>
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
+Tested-by: Rob Clark <rob.clark@linaro.org>
+Tested-by: Ohad Ben-Cohen <ohad@wizery.com>
+Tested-by: Benjamin Gaignard <benjamin.gaignard@linaro.org>
+Tested-by: Robert Nelson <robertcnelson@gmail.com>
+Tested-by: Barry Song <Baohua.Song@csr.com>
+(cherry picked from commit 0815f3d81d76dfbf2abcfd93a85ff0a6008fe4c0)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+---
+ include/linux/gfp.h | 3 ++-
+ include/linux/page-isolation.h | 18 +++++++++---------
+ mm/memory-failure.c | 2 +-
+ mm/memory_hotplug.c | 6 +++---
+ mm/page_alloc.c | 17 +++++++++++------
+ mm/page_isolation.c | 15 ++++++++-------
+ 6 files changed, 34 insertions(+), 27 deletions(-)
+
+--- a/include/linux/gfp.h
++++ b/include/linux/gfp.h
+@@ -394,7 +394,8 @@ static inline bool pm_suspended_storage(
+ #ifdef CONFIG_CMA
+
+ /* The below functions must be run on a range from a single zone. */
+-extern int alloc_contig_range(unsigned long start, unsigned long end);
++extern int alloc_contig_range(unsigned long start, unsigned long end,
++ unsigned migratetype);
+ extern void free_contig_range(unsigned long pfn, unsigned nr_pages);
+
+ /* CMA stuff */
+--- a/include/linux/page-isolation.h
++++ b/include/linux/page-isolation.h
+@@ -3,7 +3,7 @@
+
+ /*
+ * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE.
+- * If specified range includes migrate types other than MOVABLE,
++ * If specified range includes migrate types other than MOVABLE or CMA,
+ * this will fail with -EBUSY.
+ *
+ * For isolating all pages in the range finally, the caller have to
+@@ -11,27 +11,27 @@
+ * test it.
+ */
+ extern int
+-start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn);
++start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
++ unsigned migratetype);
+
+ /*
+ * Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE.
+ * target range is [start_pfn, end_pfn)
+ */
+ extern int
+-undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn);
++undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
++ unsigned migratetype);
+
+ /*
+- * test all pages in [start_pfn, end_pfn)are isolated or not.
++ * Test all pages in [start_pfn, end_pfn) are isolated or not.
+ */
+-extern int
+-test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn);
++int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn);
+
+ /*
+- * Internal funcs.Changes pageblock's migrate type.
+- * Please use make_pagetype_isolated()/make_pagetype_movable().
++ * Internal functions. Changes pageblock's migrate type.
+ */
+ extern int set_migratetype_isolate(struct page *page);
+-extern void unset_migratetype_isolate(struct page *page);
++extern void unset_migratetype_isolate(struct page *page, unsigned migratetype);
+
+
+ #endif
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -1406,7 +1406,7 @@ static int get_any_page(struct page *p,
+ /* Not a free page */
+ ret = 1;
+ }
+- unset_migratetype_isolate(p);
++ unset_migratetype_isolate(p, MIGRATE_MOVABLE);
+ unlock_memory_hotplug();
+ return ret;
+ }
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -895,7 +895,7 @@ static int __ref offline_pages(unsigned
+ nr_pages = end_pfn - start_pfn;
+
+ /* set above range as isolated */
+- ret = start_isolate_page_range(start_pfn, end_pfn);
++ ret = start_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
+ if (ret)
+ goto out;
+
+@@ -960,7 +960,7 @@ repeat:
+ We cannot do rollback at this point. */
+ offline_isolated_pages(start_pfn, end_pfn);
+ /* reset pagetype flags and makes migrate type to be MOVABLE */
+- undo_isolate_page_range(start_pfn, end_pfn);
++ undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
+ /* removal success */
+ zone->present_pages -= offlined_pages;
+ zone->zone_pgdat->node_present_pages -= offlined_pages;
+@@ -985,7 +985,7 @@ failed_removal:
+ start_pfn, end_pfn);
+ memory_notify(MEM_CANCEL_OFFLINE, &arg);
+ /* pushback to free area */
+- undo_isolate_page_range(start_pfn, end_pfn);
++ undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
+
+ out:
+ unlock_memory_hotplug();
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -5582,7 +5582,7 @@ out:
+ return ret;
+ }
+
+-void unset_migratetype_isolate(struct page *page)
++void unset_migratetype_isolate(struct page *page, unsigned migratetype)
+ {
+ struct zone *zone;
+ unsigned long flags;
+@@ -5590,8 +5590,8 @@ void unset_migratetype_isolate(struct pa
+ spin_lock_irqsave(&zone->lock, flags);
+ if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
+ goto out;
+- set_pageblock_migratetype(page, MIGRATE_MOVABLE);
+- move_freepages_block(zone, page, MIGRATE_MOVABLE);
++ set_pageblock_migratetype(page, migratetype);
++ move_freepages_block(zone, page, migratetype);
+ out:
+ spin_unlock_irqrestore(&zone->lock, flags);
+ }
+@@ -5669,6 +5669,10 @@ static int __alloc_contig_migrate_range(
+ * alloc_contig_range() -- tries to allocate given range of pages
+ * @start: start PFN to allocate
+ * @end: one-past-the-last PFN to allocate
++ * @migratetype: migratetype of the underlaying pageblocks (either
++ * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks
++ * in range must have the same migratetype and it must
++ * be either of the two.
+ *
+ * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
+ * aligned, however it's the caller's responsibility to guarantee that
+@@ -5681,7 +5685,8 @@ static int __alloc_contig_migrate_range(
+ * pages which PFN is in [start, end) are allocated for the caller and
+ * need to be freed with free_contig_range().
+ */
+-int alloc_contig_range(unsigned long start, unsigned long end)
++int alloc_contig_range(unsigned long start, unsigned long end,
++ unsigned migratetype)
+ {
+ struct zone *zone = page_zone(pfn_to_page(start));
+ unsigned long outer_start, outer_end;
+@@ -5712,7 +5717,7 @@ int alloc_contig_range(unsigned long sta
+ */
+
+ ret = start_isolate_page_range(pfn_max_align_down(start),
+- pfn_max_align_up(end));
++ pfn_max_align_up(end), migratetype);
+ if (ret)
+ goto done;
+
+@@ -5772,7 +5777,7 @@ int alloc_contig_range(unsigned long sta
+
+ done:
+ undo_isolate_page_range(pfn_max_align_down(start),
+- pfn_max_align_up(end));
++ pfn_max_align_up(end), migratetype);
+ return ret;
+ }
+
+--- a/mm/page_isolation.c
++++ b/mm/page_isolation.c
+@@ -24,6 +24,7 @@ __first_valid_page(unsigned long pfn, un
+ * to be MIGRATE_ISOLATE.
+ * @start_pfn: The lower PFN of the range to be isolated.
+ * @end_pfn: The upper PFN of the range to be isolated.
++ * @migratetype: migrate type to set in error recovery.
+ *
+ * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
+ * the range will never be allocated. Any free pages and pages freed in the
+@@ -32,8 +33,8 @@ __first_valid_page(unsigned long pfn, un
+ * start_pfn/end_pfn must be aligned to pageblock_order.
+ * Returns 0 on success and -EBUSY if any part of range cannot be isolated.
+ */
+-int
+-start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn)
++int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
++ unsigned migratetype)
+ {
+ unsigned long pfn;
+ unsigned long undo_pfn;
+@@ -56,7 +57,7 @@ undo:
+ for (pfn = start_pfn;
+ pfn < undo_pfn;
+ pfn += pageblock_nr_pages)
+- unset_migratetype_isolate(pfn_to_page(pfn));
++ unset_migratetype_isolate(pfn_to_page(pfn), migratetype);
+
+ return -EBUSY;
+ }
+@@ -64,8 +65,8 @@ undo:
+ /*
+ * Make isolated pages available again.
+ */
+-int
+-undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn)
++int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
++ unsigned migratetype)
+ {
+ unsigned long pfn;
+ struct page *page;
+@@ -77,7 +78,7 @@ undo_isolate_page_range(unsigned long st
+ page = __first_valid_page(pfn, pageblock_nr_pages);
+ if (!page || get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
+ continue;
+- unset_migratetype_isolate(page);
++ unset_migratetype_isolate(page, migratetype);
+ }
+ return 0;
+ }
+@@ -86,7 +87,7 @@ undo_isolate_page_range(unsigned long st
+ * all pages in [start_pfn...end_pfn) must be in the same zone.
+ * zone->lock must be held before call this.
+ *
+- * Returns 1 if all pages in the range is isolated.
++ * Returns 1 if all pages in the range are isolated.
+ */
+ static int
+ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn)
diff --git a/patches.dma-mapping/mm-serialize-access-to-min_free_kbytes.patch b/patches.dma-mapping/mm-serialize-access-to-min_free_kbytes.patch
new file mode 100644
index 0000000000000..b5e72594a24cf
--- /dev/null
+++ b/patches.dma-mapping/mm-serialize-access-to-min_free_kbytes.patch
@@ -0,0 +1,73 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:52:10 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:50:27 +0900
+Subject: [PATCH v2 10/58] mm: Serialize access to min_free_kbytes
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-11-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Mel Gorman <mgorman@suse.de>
+
+There is a race between the min_free_kbytes sysctl, memory hotplug
+and transparent hugepage support enablement. Memory hotplug uses a
+zonelists_mutex to avoid a race when building zonelists. Reuse it to
+serialise watermark updates.
+
+[a.p.zijlstra@chello.nl: Older patch fixed the race with spinlock]
+Signed-off-by: Mel Gorman <mgorman@suse.de>
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
+Tested-by: Barry Song <Baohua.Song@csr.com>
+(cherry picked from commit cfd3da1e49bb95c355c01c0f502d657deb3d34a4)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+---
+ mm/page_alloc.c | 23 +++++++++++++++--------
+ 1 files changed, 15 insertions(+), 8 deletions(-)
+
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 116c087..8be37bc 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -5020,14 +5020,7 @@ static void setup_per_zone_lowmem_reserve(void)
+ calculate_totalreserve_pages();
+ }
+
+-/**
+- * setup_per_zone_wmarks - called when min_free_kbytes changes
+- * or when memory is hot-{added|removed}
+- *
+- * Ensures that the watermark[min,low,high] values for each zone are set
+- * correctly with respect to min_free_kbytes.
+- */
+-void setup_per_zone_wmarks(void)
++static void __setup_per_zone_wmarks(void)
+ {
+ unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
+ unsigned long lowmem_pages = 0;
+@@ -5082,6 +5075,20 @@ void setup_per_zone_wmarks(void)
+ calculate_totalreserve_pages();
+ }
+
++/**
++ * setup_per_zone_wmarks - called when min_free_kbytes changes
++ * or when memory is hot-{added|removed}
++ *
++ * Ensures that the watermark[min,low,high] values for each zone are set
++ * correctly with respect to min_free_kbytes.
++ */
++void setup_per_zone_wmarks(void)
++{
++ mutex_lock(&zonelists_mutex);
++ __setup_per_zone_wmarks();
++ mutex_unlock(&zonelists_mutex);
++}
++
+ /*
+ * The inactive anon list should be small enough that the VM never has to
+ * do too much work, but large enough that each inactive page has a chance
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/mm-trigger-page-reclaim-in-alloc_contig_range-to-stabilise-watermarks.patch b/patches.dma-mapping/mm-trigger-page-reclaim-in-alloc_contig_range-to-stabilise-watermarks.patch
new file mode 100644
index 0000000000000..40f8128f342cc
--- /dev/null
+++ b/patches.dma-mapping/mm-trigger-page-reclaim-in-alloc_contig_range-to-stabilise-watermarks.patch
@@ -0,0 +1,151 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:52:15 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:50:29 +0900
+Subject: [PATCH v2 12/58] mm: trigger page reclaim in alloc_contig_range() to stabilise watermarks
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-13-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+
+alloc_contig_range() performs memory allocation so it also should keep
+track on keeping the correct level of memory watermarks. This commit adds
+a call to *_slowpath style reclaim to grab enough pages to make sure that
+the final collection of contiguous pages from freelists will not starve
+the system.
+
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
+CC: Michal Nazarewicz <mina86@mina86.com>
+Tested-by: Rob Clark <rob.clark@linaro.org>
+Tested-by: Ohad Ben-Cohen <ohad@wizery.com>
+Tested-by: Benjamin Gaignard <benjamin.gaignard@linaro.org>
+Tested-by: Robert Nelson <robertcnelson@gmail.com>
+Tested-by: Barry Song <Baohua.Song@csr.com>
+(cherry picked from commit 49f223a9cd96c7293d7258ff88c2bdf83065f69c)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+---
+ include/linux/mmzone.h | 9 +++++++
+ mm/page_alloc.c | 60 ++++++++++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 69 insertions(+), 0 deletions(-)
+
+diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
+index 7d2db87..8d6da7d 100644
+--- a/include/linux/mmzone.h
++++ b/include/linux/mmzone.h
+@@ -63,8 +63,10 @@ enum {
+
+ #ifdef CONFIG_CMA
+ # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
++# define cma_wmark_pages(zone) zone->min_cma_pages
+ #else
+ # define is_migrate_cma(migratetype) false
++# define cma_wmark_pages(zone) 0
+ #endif
+
+ #define for_each_migratetype_order(order, type) \
+@@ -371,6 +373,13 @@ struct zone {
+ /* see spanned/present_pages for more description */
+ seqlock_t span_seqlock;
+ #endif
++#ifdef CONFIG_CMA
++ /*
++ * CMA needs to increase watermark levels during the allocation
++ * process to make sure that the system is not starved.
++ */
++ unsigned long min_cma_pages;
++#endif
+ struct free_area free_area[MAX_ORDER];
+
+ #ifndef CONFIG_SPARSEMEM
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 4615531..22348ae 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -5079,6 +5079,11 @@ static void __setup_per_zone_wmarks(void)
+
+ zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2);
+ zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
++
++ zone->watermark[WMARK_MIN] += cma_wmark_pages(zone);
++ zone->watermark[WMARK_LOW] += cma_wmark_pages(zone);
++ zone->watermark[WMARK_HIGH] += cma_wmark_pages(zone);
++
+ setup_zone_migrate_reserve(zone);
+ spin_unlock_irqrestore(&zone->lock, flags);
+ }
+@@ -5684,6 +5689,54 @@ static int __alloc_contig_migrate_range(unsigned long start, unsigned long end)
+ return ret > 0 ? 0 : ret;
+ }
+
++/*
++ * Update zone's cma pages counter used for watermark level calculation.
++ */
++static inline void __update_cma_watermarks(struct zone *zone, int count)
++{
++ unsigned long flags;
++ spin_lock_irqsave(&zone->lock, flags);
++ zone->min_cma_pages += count;
++ spin_unlock_irqrestore(&zone->lock, flags);
++ setup_per_zone_wmarks();
++}
++
++/*
++ * Trigger memory pressure bump to reclaim some pages in order to be able to
++ * allocate 'count' pages in single page units. Does similar work as
++ *__alloc_pages_slowpath() function.
++ */
++static int __reclaim_pages(struct zone *zone, gfp_t gfp_mask, int count)
++{
++ enum zone_type high_zoneidx = gfp_zone(gfp_mask);
++ struct zonelist *zonelist = node_zonelist(0, gfp_mask);
++ int did_some_progress = 0;
++ int order = 1;
++
++ /*
++ * Increase level of watermarks to force kswapd do his job
++ * to stabilise at new watermark level.
++ */
++ __update_cma_watermarks(zone, count);
++
++ /* Obey watermarks as if the page was being allocated */
++ while (!zone_watermark_ok(zone, 0, low_wmark_pages(zone), 0, 0)) {
++ wake_all_kswapd(order, zonelist, high_zoneidx, zone_idx(zone));
++
++ did_some_progress = __perform_reclaim(gfp_mask, order, zonelist,
++ NULL);
++ if (!did_some_progress) {
++ /* Exhausted what can be done so it's blamo time */
++ out_of_memory(zonelist, gfp_mask, order, NULL, false);
++ }
++ }
++
++ /* Restore original watermark levels. */
++ __update_cma_watermarks(zone, -count);
++
++ return count;
++}
++
+ /**
+ * alloc_contig_range() -- tries to allocate given range of pages
+ * @start: start PFN to allocate
+@@ -5782,6 +5835,13 @@ int alloc_contig_range(unsigned long start, unsigned long end,
+ goto done;
+ }
+
++ /*
++ * Reclaim enough pages to make sure that contiguous allocation
++ * will not starve the system.
++ */
++ __reclaim_pages(zone, GFP_HIGHUSER_MOVABLE, end-start);
++
++ /* Grab isolated pages from freelists. */
+ outer_end = isolate_freepages_range(outer_start, end);
+ if (!outer_end) {
+ ret = -EBUSY;
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/mm-vmalloc-use-const-void-for-caller-argument.patch b/patches.dma-mapping/mm-vmalloc-use-const-void-for-caller-argument.patch
new file mode 100644
index 0000000000000..3944fe431e6a4
--- /dev/null
+++ b/patches.dma-mapping/mm-vmalloc-use-const-void-for-caller-argument.patch
@@ -0,0 +1,145 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:53:28 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:50:57 +0900
+Subject: [PATCH v2 40/58] mm: vmalloc: use const void * for caller argument
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-41-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+
+'const void *' is a safer type for caller function type. This patch
+updates all references to caller function type.
+
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Reviewed-by: Kyungmin Park <kyungmin.park@samsung.com>
+Reviewed-by: Minchan Kim <minchan@kernel.org>
+(cherry picked from commit 5e6cafc83e30f0f70c79a2b7aef237dc57e29f02)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+---
+ include/linux/vmalloc.h | 8 ++++----
+ mm/vmalloc.c | 18 +++++++++---------
+ 2 files changed, 13 insertions(+), 13 deletions(-)
+
+diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
+index dcdfc2b..2e28f4d 100644
+--- a/include/linux/vmalloc.h
++++ b/include/linux/vmalloc.h
+@@ -32,7 +32,7 @@ struct vm_struct {
+ struct page **pages;
+ unsigned int nr_pages;
+ phys_addr_t phys_addr;
+- void *caller;
++ const void *caller;
+ };
+
+ /*
+@@ -62,7 +62,7 @@ extern void *vmalloc_32_user(unsigned long size);
+ extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
+ extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
+ unsigned long start, unsigned long end, gfp_t gfp_mask,
+- pgprot_t prot, int node, void *caller);
++ pgprot_t prot, int node, const void *caller);
+ extern void vfree(const void *addr);
+
+ extern void *vmap(struct page **pages, unsigned int count,
+@@ -85,13 +85,13 @@ static inline size_t get_vm_area_size(const struct vm_struct *area)
+
+ extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
+ extern struct vm_struct *get_vm_area_caller(unsigned long size,
+- unsigned long flags, void *caller);
++ unsigned long flags, const void *caller);
+ extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
+ unsigned long start, unsigned long end);
+ extern struct vm_struct *__get_vm_area_caller(unsigned long size,
+ unsigned long flags,
+ unsigned long start, unsigned long end,
+- void *caller);
++ const void *caller);
+ extern struct vm_struct *remove_vm_area(const void *addr);
+
+ extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
+diff --git a/mm/vmalloc.c b/mm/vmalloc.c
+index 1196c77..389e77c 100644
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -1280,7 +1280,7 @@ DEFINE_RWLOCK(vmlist_lock);
+ struct vm_struct *vmlist;
+
+ static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
+- unsigned long flags, void *caller)
++ unsigned long flags, const void *caller)
+ {
+ vm->flags = flags;
+ vm->addr = (void *)va->va_start;
+@@ -1306,7 +1306,7 @@ static void insert_vmalloc_vmlist(struct vm_struct *vm)
+ }
+
+ static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
+- unsigned long flags, void *caller)
++ unsigned long flags, const void *caller)
+ {
+ setup_vmalloc_vm(vm, va, flags, caller);
+ insert_vmalloc_vmlist(vm);
+@@ -1314,7 +1314,7 @@ static void insert_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
+
+ static struct vm_struct *__get_vm_area_node(unsigned long size,
+ unsigned long align, unsigned long flags, unsigned long start,
+- unsigned long end, int node, gfp_t gfp_mask, void *caller)
++ unsigned long end, int node, gfp_t gfp_mask, const void *caller)
+ {
+ struct vmap_area *va;
+ struct vm_struct *area;
+@@ -1375,7 +1375,7 @@ EXPORT_SYMBOL_GPL(__get_vm_area);
+
+ struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
+ unsigned long start, unsigned long end,
+- void *caller)
++ const void *caller)
+ {
+ return __get_vm_area_node(size, 1, flags, start, end, -1, GFP_KERNEL,
+ caller);
+@@ -1397,7 +1397,7 @@ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
+ }
+
+ struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
+- void *caller)
++ const void *caller)
+ {
+ return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
+ -1, GFP_KERNEL, caller);
+@@ -1568,9 +1568,9 @@ EXPORT_SYMBOL(vmap);
+
+ static void *__vmalloc_node(unsigned long size, unsigned long align,
+ gfp_t gfp_mask, pgprot_t prot,
+- int node, void *caller);
++ int node, const void *caller);
+ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
+- pgprot_t prot, int node, void *caller)
++ pgprot_t prot, int node, const void *caller)
+ {
+ const int order = 0;
+ struct page **pages;
+@@ -1643,7 +1643,7 @@ fail:
+ */
+ void *__vmalloc_node_range(unsigned long size, unsigned long align,
+ unsigned long start, unsigned long end, gfp_t gfp_mask,
+- pgprot_t prot, int node, void *caller)
++ pgprot_t prot, int node, const void *caller)
+ {
+ struct vm_struct *area;
+ void *addr;
+@@ -1699,7 +1699,7 @@ fail:
+ */
+ static void *__vmalloc_node(unsigned long size, unsigned long align,
+ gfp_t gfp_mask, pgprot_t prot,
+- int node, void *caller)
++ int node, const void *caller)
+ {
+ return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
+ gfp_mask, prot, node, caller);
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/x86-dma-mapping-fix-broken-allocation-when-dma_mask-has-been-provided.patch b/patches.dma-mapping/x86-dma-mapping-fix-broken-allocation-when-dma_mask-has-been-provided.patch
new file mode 100644
index 0000000000000..d9698ee91561f
--- /dev/null
+++ b/patches.dma-mapping/x86-dma-mapping-fix-broken-allocation-when-dma_mask-has-been-provided.patch
@@ -0,0 +1,50 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:53:15 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:50:51 +0900
+Subject: [PATCH v2 34/58] x86: dma-mapping: fix broken allocation when dma_mask has been provided
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-35-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+
+Commit 0a2b9a6ea93 ("X86: integrate CMA with DMA-mapping subsystem")
+broke memory allocation with dma_mask. This patch fixes possible kernel
+ops caused by lack of resetting page variable when jumping to 'again' label.
+
+Reported-by: Konrad Rzeszutek Wilk <konrad@darnok.org>
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Acked-by: Michal Nazarewicz <mina86@mina86.com>
+(cherry picked from commit c080e26edc3a2a3cdfa4c430c663ee1c3bbd8fae)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+---
+ arch/x86/kernel/pci-dma.c | 3 ++-
+ 1 files changed, 2 insertions(+), 1 deletions(-)
+
+diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
+index 62c9457..c0f420f 100644
+--- a/arch/x86/kernel/pci-dma.c
++++ b/arch/x86/kernel/pci-dma.c
+@@ -100,7 +100,7 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
+ struct dma_attrs *attrs)
+ {
+ unsigned long dma_mask;
+- struct page *page = NULL;
++ struct page *page;
+ unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ dma_addr_t addr;
+
+@@ -108,6 +108,7 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
+
+ flag |= __GFP_ZERO;
+ again:
++ page = NULL;
+ if (!(flag & GFP_ATOMIC))
+ page = dma_alloc_from_contiguous(dev, count, get_order(size));
+ if (!page)
+--
+1.7.5.4
+
diff --git a/patches.dma-mapping/x86-integrate-cma-with-dma-mapping-subsystem.patch b/patches.dma-mapping/x86-integrate-cma-with-dma-mapping-subsystem.patch
new file mode 100644
index 0000000000000..a63ce6abfa493
--- /dev/null
+++ b/patches.dma-mapping/x86-integrate-cma-with-dma-mapping-subsystem.patch
@@ -0,0 +1,162 @@
+From dhobsong@igel.co.jp Mon Oct 29 00:52:21 2012
+From: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Date: Mon, 29 Oct 2012 16:50:31 +0900
+Subject: [PATCH v2 14/58] X86: integrate CMA with DMA-mapping subsystem
+To: greg@kroah.com, laurent.pinchart@ideasonboard.com, horms@verge.net.au
+Cc: ltsi-dev@lists.linuxfoundation.org, dhobsong@igel.co.jp
+Message-ID: <1351497075-32717-15-git-send-email-dhobsong@igel.co.jp>
+
+
+From: Marek Szyprowski <m.szyprowski@samsung.com>
+
+This patch adds support for CMA to dma-mapping subsystem for x86
+architecture that uses common pci-dma/pci-nommu implementation. This
+allows to test CMA on KVM/QEMU and a lot of common x86 boxes.
+
+Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
+CC: Michal Nazarewicz <mina86@mina86.com>
+Acked-by: Arnd Bergmann <arnd@arndb.de>
+(cherry picked from commit 0a2b9a6ea93650b8a00f9fd5ee8fdd25671e2df6)
+
+Signed-off-by: Damian Hobson-Garcia <dhobsong@igel.co.jp>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+---
+ arch/x86/Kconfig | 1 +
+ arch/x86/include/asm/dma-contiguous.h | 13 +++++++++++++
+ arch/x86/include/asm/dma-mapping.h | 5 +++++
+ arch/x86/kernel/pci-dma.c | 18 ++++++++++++++++--
+ arch/x86/kernel/pci-nommu.c | 8 +-------
+ arch/x86/kernel/setup.c | 2 ++
+ 6 files changed, 38 insertions(+), 9 deletions(-)
+ create mode 100644 arch/x86/include/asm/dma-contiguous.h
+
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -32,6 +32,7 @@ config X86
+ select ARCH_WANT_OPTIONAL_GPIOLIB
+ select ARCH_WANT_FRAME_POINTERS
+ select HAVE_DMA_ATTRS
++ select HAVE_DMA_CONTIGUOUS if !SWIOTLB
+ select HAVE_KRETPROBES
+ select HAVE_OPTPROBES
+ select HAVE_FTRACE_MCOUNT_RECORD
+--- /dev/null
++++ b/arch/x86/include/asm/dma-contiguous.h
+@@ -0,0 +1,13 @@
++#ifndef ASMX86_DMA_CONTIGUOUS_H
++#define ASMX86_DMA_CONTIGUOUS_H
++
++#ifdef __KERNEL__
++
++#include <linux/types.h>
++#include <asm-generic/dma-contiguous.h>
++
++static inline void
++dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { }
++
++#endif
++#endif
+--- a/arch/x86/include/asm/dma-mapping.h
++++ b/arch/x86/include/asm/dma-mapping.h
+@@ -13,6 +13,7 @@
+ #include <asm/io.h>
+ #include <asm/swiotlb.h>
+ #include <asm-generic/dma-coherent.h>
++#include <linux/dma-contiguous.h>
+
+ #ifdef CONFIG_ISA
+ # define ISA_DMA_BIT_MASK DMA_BIT_MASK(24)
+@@ -62,6 +63,10 @@ extern void *dma_generic_alloc_coherent(
+ dma_addr_t *dma_addr, gfp_t flag,
+ struct dma_attrs *attrs);
+
++extern void dma_generic_free_coherent(struct device *dev, size_t size,
++ void *vaddr, dma_addr_t dma_addr,
++ struct dma_attrs *attrs);
++
+ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
+ {
+ if (!dev->dma_mask)
+--- a/arch/x86/kernel/pci-dma.c
++++ b/arch/x86/kernel/pci-dma.c
+@@ -100,14 +100,18 @@ void *dma_generic_alloc_coherent(struct
+ struct dma_attrs *attrs)
+ {
+ unsigned long dma_mask;
+- struct page *page;
++ struct page *page = NULL;
++ unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ dma_addr_t addr;
+
+ dma_mask = dma_alloc_coherent_mask(dev, flag);
+
+ flag |= __GFP_ZERO;
+ again:
+- page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
++ if (!(flag & GFP_ATOMIC))
++ page = dma_alloc_from_contiguous(dev, count, get_order(size));
++ if (!page)
++ page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
+ if (!page)
+ return NULL;
+
+@@ -127,6 +131,16 @@ again:
+ return page_address(page);
+ }
+
++void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr,
++ dma_addr_t dma_addr, struct dma_attrs *attrs)
++{
++ unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
++ struct page *page = virt_to_page(vaddr);
++
++ if (!dma_release_from_contiguous(dev, page, count))
++ free_pages((unsigned long)vaddr, get_order(size));
++}
++
+ /*
+ * See <Documentation/x86/x86_64/boot-options.txt> for the iommu kernel
+ * parameter documentation.
+--- a/arch/x86/kernel/pci-nommu.c
++++ b/arch/x86/kernel/pci-nommu.c
+@@ -74,12 +74,6 @@ static int nommu_map_sg(struct device *h
+ return nents;
+ }
+
+-static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr,
+- dma_addr_t dma_addr, struct dma_attrs *attrs)
+-{
+- free_pages((unsigned long)vaddr, get_order(size));
+-}
+-
+ static void nommu_sync_single_for_device(struct device *dev,
+ dma_addr_t addr, size_t size,
+ enum dma_data_direction dir)
+@@ -97,7 +91,7 @@ static void nommu_sync_sg_for_device(str
+
+ struct dma_map_ops nommu_dma_ops = {
+ .alloc = dma_generic_alloc_coherent,
+- .free = nommu_free_coherent,
++ .free = dma_generic_free_coherent,
+ .map_sg = nommu_map_sg,
+ .map_page = nommu_map_page,
+ .sync_single_for_device = nommu_sync_single_for_device,
+--- a/arch/x86/kernel/setup.c
++++ b/arch/x86/kernel/setup.c
+@@ -50,6 +50,7 @@
+ #include <asm/pci-direct.h>
+ #include <linux/init_ohci1394_dma.h>
+ #include <linux/kvm_para.h>
++#include <linux/dma-contiguous.h>
+
+ #include <linux/errno.h>
+ #include <linux/kernel.h>
+@@ -948,6 +949,7 @@ void __init setup_arch(char **cmdline_p)
+ }
+ #endif
+ memblock.current_limit = get_max_mapped();
++ dma_contiguous_reserve(0);
+
+ /*
+ * NOTE: On x86-32, only from this point on, fixmaps are ready for use.
diff --git a/patches.kzm9d/arm-mach-shmobile-fix-build-when-smp-is-enabled-and-emev2-is-not-enabled.patch b/patches.kzm9d/arm-mach-shmobile-fix-build-when-smp-is-enabled-and-emev2-is-not-enabled.patch
index 5c4105ed426e1..24fb131200b93 100644
--- a/patches.kzm9d/arm-mach-shmobile-fix-build-when-smp-is-enabled-and-emev2-is-not-enabled.patch
+++ b/patches.kzm9d/arm-mach-shmobile-fix-build-when-smp-is-enabled-and-emev2-is-not-enabled.patch
@@ -38,8 +38,6 @@ Signed-off-by: Tetsuyuki Kobayashi <koba@kmckk.co.jp>
arch/arm/mach-shmobile/platsmp.c | 5 +++++
1 file changed, 5 insertions(+)
-diff --git a/arch/arm/mach-shmobile/platsmp.c b/arch/arm/mach-shmobile/platsmp.c
-index 5f1a59b..fde0d23 100644
--- a/arch/arm/mach-shmobile/platsmp.c
+++ b/arch/arm/mach-shmobile/platsmp.c
@@ -30,7 +30,12 @@
@@ -55,6 +53,3 @@ index 5f1a59b..fde0d23 100644
static unsigned int __init shmobile_smp_get_core_count(void)
{
---
-1.7.9.5
-
diff --git a/patches.marzen/0016-devicetree-add-helper-inline-for-retrieving-a-node-s.patch b/patches.marzen/0016-devicetree-add-helper-inline-for-retrieving-a-node-s.patch
index 24847e4eed646..4c4bbf3ca3497 100644
--- a/patches.marzen/0016-devicetree-add-helper-inline-for-retrieving-a-node-s.patch
+++ b/patches.marzen/0016-devicetree-add-helper-inline-for-retrieving-a-node-s.patch
@@ -17,23 +17,21 @@ Signed-off-by: Rob Herring <rob.herring@calxeda.com>
Signed-off-by: Simon Horman <horms@verge.net.au>
---
- arch/microblaze/pci/pci-common.c | 6 ++----
- arch/powerpc/kernel/pci-common.c | 6 ++----
- arch/powerpc/kernel/vio.c | 5 ++---
- arch/powerpc/platforms/cell/iommu.c | 3 +--
- arch/powerpc/platforms/pseries/iommu.c | 2 +-
- arch/sparc/kernel/of_device_64.c | 2 +-
- drivers/of/base.c | 2 +-
- drivers/of/irq.c | 2 +-
- include/linux/of.h | 10 ++++++++++
- kernel/irq/irqdomain.c | 8 ++++----
+ arch/microblaze/pci/pci-common.c | 6 ++----
+ arch/powerpc/kernel/pci-common.c | 6 ++----
+ arch/powerpc/kernel/vio.c | 5 ++---
+ arch/powerpc/platforms/cell/iommu.c | 3 +--
+ arch/powerpc/platforms/pseries/iommu.c | 2 +-
+ arch/sparc/kernel/of_device_64.c | 2 +-
+ drivers/of/base.c | 2 +-
+ drivers/of/irq.c | 2 +-
+ include/linux/of.h | 10 ++++++++++
+ kernel/irq/irqdomain.c | 8 ++++----
10 files changed, 25 insertions(+), 21 deletions(-)
-diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
-index d10403d..162b4e5 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
-@@ -249,8 +249,7 @@ int pci_read_irq_line(struct pci_dev *pci_dev)
+@@ -249,8 +249,7 @@ int pci_read_irq_line(struct pci_dev *pc
} else {
pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
oirq.size, oirq.specifier[0], oirq.specifier[1],
@@ -43,7 +41,7 @@ index d10403d..162b4e5 100644
virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
oirq.size);
-@@ -1492,8 +1491,7 @@ static void __devinit pcibios_scan_phb(struct pci_controller *hose)
+@@ -1492,8 +1491,7 @@ static void __devinit pcibios_scan_phb(s
struct pci_bus *bus;
struct device_node *node = hose->dn;
@@ -53,11 +51,9 @@ index d10403d..162b4e5 100644
pcibios_setup_phb_resources(hose, &resources);
-diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
-index 8e78e93..886c254 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
-@@ -248,8 +248,7 @@ static int pci_read_irq_line(struct pci_dev *pci_dev)
+@@ -248,8 +248,7 @@ static int pci_read_irq_line(struct pci_
} else {
pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
oirq.size, oirq.specifier[0], oirq.specifier[1],
@@ -67,7 +63,7 @@ index 8e78e93..886c254 100644
virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
oirq.size);
-@@ -1628,8 +1627,7 @@ void __devinit pcibios_scan_phb(struct pci_controller *hose)
+@@ -1628,8 +1627,7 @@ void __devinit pcibios_scan_phb(struct p
struct device_node *node = hose->dn;
int mode;
@@ -77,11 +73,9 @@ index 8e78e93..886c254 100644
/* Get some IO space for the new PHB */
pcibios_setup_phb_io_space(hose);
-diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
-index a3a9990..ea6081a 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
-@@ -1193,8 +1193,7 @@ static void __devinit vio_dev_release(struct device *dev)
+@@ -1194,8 +1194,7 @@ static void __devinit vio_dev_release(st
struct iommu_table *tbl = get_iommu_table_base(dev);
if (tbl)
@@ -91,7 +85,7 @@ index a3a9990..ea6081a 100644
of_node_put(dev->of_node);
kfree(to_vio_dev(dev));
}
-@@ -1330,7 +1329,7 @@ static ssize_t devspec_show(struct device *dev,
+@@ -1331,7 +1330,7 @@ static ssize_t devspec_show(struct devic
{
struct device_node *of_node = dev->of_node;
@@ -100,11 +94,9 @@ index a3a9990..ea6081a 100644
}
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
-diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
-index b9f509a..b673200 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
-@@ -552,8 +552,7 @@ static struct iommu_table *cell_get_iommu_table(struct device *dev)
+@@ -552,8 +552,7 @@ static struct iommu_table *cell_get_iomm
iommu = cell_iommu_for_node(dev_to_node(dev));
if (iommu == NULL || list_empty(&iommu->windows)) {
printk(KERN_ERR "iommu: missing iommu for %s (node %d)\n",
@@ -114,11 +106,9 @@ index b9f509a..b673200 100644
return NULL;
}
window = list_entry(iommu->windows.next, struct iommu_window, list);
-diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
-index 2d311c0..6b58a39 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
-@@ -1051,7 +1051,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
+@@ -1051,7 +1051,7 @@ static void pci_dma_dev_setup_pSeriesLP(
if (!pdn || !PCI_DN(pdn)) {
printk(KERN_WARNING "pci_dma_dev_setup_pSeriesLP: "
"no DMA window found for pci dev=%s dn=%s\n",
@@ -127,11 +117,9 @@ index 2d311c0..6b58a39 100644
return;
}
pr_debug(" parent is %s\n", pdn->full_name);
-diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c
-index 7a3be6f..7bbdc26 100644
--- a/arch/sparc/kernel/of_device_64.c
+++ b/arch/sparc/kernel/of_device_64.c
-@@ -580,7 +580,7 @@ static unsigned int __init build_one_device_irq(struct platform_device *op,
+@@ -580,7 +580,7 @@ static unsigned int __init build_one_dev
printk("%s: Apply [%s:%x] imap --> [%s:%x]\n",
op->dev.of_node->full_name,
pp->full_name, this_orig_irq,
@@ -140,11 +128,9 @@ index 7a3be6f..7bbdc26 100644
if (!iret)
break;
-diff --git a/drivers/of/base.c b/drivers/of/base.c
-index 5806449..6271c2e 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
-@@ -1173,7 +1173,7 @@ static void of_alias_add(struct alias_prop *ap, struct device_node *np,
+@@ -1173,7 +1173,7 @@ static void of_alias_add(struct alias_pr
ap->stem[stem_len] = 0;
list_add_tail(&ap->link, &aliases_lookup);
pr_debug("adding DT alias:%s: stem=%s id=%i node=%s\n",
@@ -153,11 +139,9 @@ index 5806449..6271c2e 100644
}
/**
-diff --git a/drivers/of/irq.c b/drivers/of/irq.c
-index 9cf0060..ff8ab7b 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
-@@ -255,7 +255,7 @@ int of_irq_map_raw(struct device_node *parent, const __be32 *intspec,
+@@ -255,7 +255,7 @@ int of_irq_map_raw(struct device_node *p
skiplevel:
/* Iterate again with new parent */
@@ -166,11 +150,9 @@ index 9cf0060..ff8ab7b 100644
of_node_put(ipar);
ipar = newpar;
newpar = NULL;
-diff --git a/include/linux/of.h b/include/linux/of.h
-index fa7fb1d..284d117 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
-@@ -163,6 +163,11 @@ static inline int of_node_to_nid(struct device_node *np) { return -1; }
+@@ -163,6 +163,11 @@ static inline int of_node_to_nid(struct
#define of_node_to_nid of_node_to_nid
#endif
@@ -182,7 +164,7 @@ index fa7fb1d..284d117 100644
extern struct device_node *of_find_node_by_name(struct device_node *from,
const char *name);
#define for_each_node_by_name(dn, name) \
-@@ -261,6 +266,11 @@ extern void of_detach_node(struct device_node *);
+@@ -261,6 +266,11 @@ extern void of_detach_node(struct device
#define of_match_ptr(_ptr) (_ptr)
#else /* CONFIG_OF */
@@ -194,11 +176,9 @@ index fa7fb1d..284d117 100644
static inline bool of_have_populated_dt(void)
{
return false;
-diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
-index 41c1564..38c5eb8 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
-@@ -448,7 +448,7 @@ unsigned int irq_create_mapping(struct irq_domain *domain,
+@@ -448,7 +448,7 @@ unsigned int irq_create_mapping(struct i
}
pr_debug("irq %lu on domain %s mapped to virtual irq %u\n",
@@ -207,7 +187,7 @@ index 41c1564..38c5eb8 100644
return virq;
}
-@@ -477,7 +477,7 @@ unsigned int irq_create_of_mapping(struct device_node *controller,
+@@ -477,7 +477,7 @@ unsigned int irq_create_of_mapping(struc
return intspec[0];
#endif
pr_warning("no irq domain found for %s !\n",
@@ -216,7 +196,7 @@ index 41c1564..38c5eb8 100644
return 0;
}
-@@ -725,8 +725,8 @@ static int virq_debug_show(struct seq_file *m, void *private)
+@@ -725,8 +725,8 @@ static int virq_debug_show(struct seq_fi
data = irq_desc_get_chip_data(desc);
seq_printf(m, data ? "0x%p " : " %p ", data);
@@ -227,6 +207,3 @@ index 41c1564..38c5eb8 100644
else
p = none;
seq_printf(m, "%s\n", p);
---
-1.8.0.197.g5a90748
-
diff --git a/patches.marzen/0037-ARM-provide-a-late_initcall-hook-for-platform-initia.patch b/patches.marzen/0037-ARM-provide-a-late_initcall-hook-for-platform-initia.patch
index 5ced9d5e5efe3..97335ca68fd0a 100644
--- a/patches.marzen/0037-ARM-provide-a-late_initcall-hook-for-platform-initia.patch
+++ b/patches.marzen/0037-ARM-provide-a-late_initcall-hook-for-platform-initia.patch
@@ -15,12 +15,10 @@ Acked-by: Russell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: Simon Horman <horms@verge.net.au>
---
- arch/arm/include/asm/mach/arch.h | 1 +
- arch/arm/kernel/setup.c | 8 ++++++++
+ arch/arm/include/asm/mach/arch.h | 1 +
+ arch/arm/kernel/setup.c | 8 ++++++++
2 files changed, 9 insertions(+)
-diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
-index d7692ca..0b1c94b 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -43,6 +43,7 @@ struct machine_desc {
@@ -31,11 +29,9 @@ index d7692ca..0b1c94b 100644
#ifdef CONFIG_MULTI_IRQ_HANDLER
void (*handle_irq)(struct pt_regs *);
#endif
-diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
-index ebfac78..549f036 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
-@@ -800,6 +800,14 @@ static int __init customize_machine(void)
+@@ -801,6 +801,14 @@ static int __init customize_machine(void
}
arch_initcall(customize_machine);
@@ -50,6 +46,3 @@ index ebfac78..549f036 100644
#ifdef CONFIG_KEXEC
static inline unsigned long long get_total_mem(void)
{
---
-1.8.0.197.g5a90748
-
diff --git a/series b/series
index 871f5f0557c0d..c5196b915c2d1 100644
--- a/series
+++ b/series
@@ -112,7 +112,7 @@ patches.pramfs/17-pramfs-makefile-and-kconfig.patch
#############################################################################
-# pramfs
+# axfs
#
# Currently broken, so don't enable, but leave here for the placeholder.
#
@@ -134,6 +134,69 @@ patches.axfs/axfs-mount-as-rootfs.patch
#############################################################################
+# dma-mapping and CMA and IOMMU memory allocation feature
+#
+patches.dma-mapping/mm-page_alloc-remove-trailing-whitespace.patch
+patches.dma-mapping/mm-compaction-introduce-isolate_migratepages_range.patch
+patches.dma-mapping/mm-compaction-introduce-map_pages.patch
+patches.dma-mapping/mm-compaction-introduce-isolate_freepages_range.patch
+patches.dma-mapping/mm-compaction-export-some-of-the-functions.patch
+patches.dma-mapping/mm-page_alloc-introduce-alloc_contig_range.patch
+patches.dma-mapping/mm-page_alloc-change-fallbacks-array-handling.patch
+patches.dma-mapping/mm-mmzone-migrate_cma-migration-type-added.patch
+patches.dma-mapping/mm-page_isolation-migrate_cma-isolation-functions-added.patch
+patches.dma-mapping/mm-serialize-access-to-min_free_kbytes.patch
+patches.dma-mapping/mm-extract-reclaim-code-from-__alloc_pages_direct_reclaim.patch
+patches.dma-mapping/mm-trigger-page-reclaim-in-alloc_contig_range-to-stabilise-watermarks.patch
+patches.dma-mapping/drivers-add-contiguous-memory-allocator.patch
+patches.dma-mapping/x86-integrate-cma-with-dma-mapping-subsystem.patch
+patches.dma-mapping/arm-integrate-cma-with-dma-mapping-subsystem.patch
+patches.dma-mapping/cma-fix-migration-mode.patch
+patches.dma-mapping/arm-dma-mapping-use-pmd-size-for-section-unmap.patch
+patches.dma-mapping/common-add-dma_mmap_from_coherent-function.patch
+patches.dma-mapping/arm-dma-mapping-use-dma_mmap_from_coherent.patch
+patches.dma-mapping/arm-dma-mapping-use-pr_-instread-of-printk.patch
+patches.dma-mapping/arm-dma-mapping-introduce-dma_error_code-constant.patch
+patches.dma-mapping/arm-dma-mapping-remove-offset-parameter-to-prepare-for-generic-dma_ops.patch
+patches.dma-mapping/arm-dma-mapping-use-asm-generic-dma-mapping-common.h.patch
+patches.dma-mapping/arm-dma-mapping-implement-dma-sg-methods-on-top-of-any-generic-dma-ops.patch
+patches.dma-mapping/arm-dma-mapping-move-all-dma-bounce-code-to-separate-dma-ops-structure.patch
+patches.dma-mapping/arm-dma-mapping-remove-redundant-code-and-do-the-cleanup.patch
+patches.dma-mapping/arm-dma-mapping-use-alloc-mmap-free-from-dma_ops.patch
+patches.dma-mapping/arm-dma-mapping-add-support-for-iommu-mapper.patch
+patches.dma-mapping/arm-dma-mapping-remove-unconditional-dependency-on-cma.patch
+patches.dma-mapping/iommu-core-pass-a-user-provided-token-to-fault-handlers.patch
+patches.dma-mapping/arm-dma-mapping-add-missing-static-storage-class-specifier.patch
+patches.dma-mapping/arm-mm-fix-type-of-the-arm_dma_limit-global-variable.patch
+patches.dma-mapping/arm-dma-mapping-fix-debug-messages-in-dmabounce-code.patch
+patches.dma-mapping/x86-dma-mapping-fix-broken-allocation-when-dma_mask-has-been-provided.patch
+patches.dma-mapping/mm-cma-don-t-replace-lowmem-pages-with-highmem.patch
+patches.dma-mapping/mm-cma-fix-condition-check-when-setting-global-cma-area.patch
+patches.dma-mapping/arm-dma-mapping-modify-condition-check-while-freeing-pages.patch
+patches.dma-mapping/arm-mm-fix-mmu-mapping-of-cma-regions.patch
+patches.dma-mapping/arm-relax-conditions-required-for-enabling-contiguous-memory-allocator.patch
+patches.dma-mapping/mm-vmalloc-use-const-void-for-caller-argument.patch
+patches.dma-mapping/arm-dma-mapping-remove-custom-consistent-dma-region.patch
+patches.dma-mapping/arm-dma-mapping-add-more-sanity-checks-in-arm_dma_mmap.patch
+patches.dma-mapping/arm-dma-mapping-fix-error-path-for-memory-allocation-failure.patch
+patches.dma-mapping/common-dma-mapping-add-support-for-generic-dma_mmap_-calls.patch
+patches.dma-mapping/mm-clean-up-__count_immobile_pages.patch
+patches.dma-mapping/mm-factor-out-memory-isolate-functions.patch
+patches.dma-mapping/driver-core-fix-some-kernel-doc-warnings-in-dma-.c.patch
+patches.dma-mapping/arm-dma-mapping-fix-buffer-chunk-allocation-order.patch
+patches.dma-mapping/arm-fix-warning-caused-by-wrongly-typed-arm_dma_limit.patch
+patches.dma-mapping/arm-dma-mapping-fix-atomic-allocation-alignment.patch
+patches.dma-mapping/arm-dma-mapping-fix-incorrect-freeing-of-atomic-allocations.patch
+patches.dma-mapping/mm-cma-fix-alignment-requirements-for-contiguous-regions.patch
+patches.dma-mapping/arm-dma-mapping-add-function-for-setting-coherent-pool-size-from-platform-code.patch
+patches.dma-mapping/arm-dma-mapping-print-warning-when-atomic-coherent-allocation-fails.patch
+patches.dma-mapping/arm-dma-mapping-refactor-out-to-introduce-__in_atomic_pool.patch
+patches.dma-mapping/arm-mm-fix-dma-pool-affiliation-check.patch
+patches.dma-mapping/arm-dma-mapping-atomic_pool-with-struct-page-pages.patch
+patches.dma-mapping/arm-dma-mapping-fix-potential-memory-leak-in-atomic_pool_init.patch
+
+
+#############################################################################
# Marzen board support
#
patches.marzen/0001-sh-clkfwk-Support-variable-size-accesses-for-MSTP-cl.patch
@@ -668,3 +731,5 @@ patches.kzm9d/arm-mach-shmobile-kzm9d-add-defconfig.patch
patches.kzm9d/mach-shmobile-emma-mobile-ev2-dt-support-v3.patch
patches.kzm9d/mach-shmobile-use-dt_machine-for-kzm9d-v3.patch
patches.kzm9d/arm-mach-shmobile-fix-build-when-smp-is-enabled-and-emev2-is-not-enabled.patch
+
+