summaryrefslogtreecommitdiff
blob: 4d28651637709036f0e82cd619545fa28f939590 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
# HG changeset 93+99+100+101+105 patch
# User kfraser@localhost.localdomain
# Date 1183985110 -3600
# Node ID 08cf42135056cbc07a6d790d4851e0e4b160f847
# Parent  f833757672a70ee43afd0bfbfaa22cec3b132445
Subject: x86: dma_map_sg() must handle multi-page segments.
Signed-off-by: Keir Fraser <keir@xensource.com>

Subject: swiotlb: Handle sync invocations on subregions of a mapped region.
Signed-off-by: Keir Fraser <keir@xensource.com>

Subject: swiotlb: Keep offset in a page strictly smaller than PAGE_SIZE.
Signed-off-by: Keir Fraser <keir@xensource.com>

Subject: swiotlb: Allow sync on arbitrary offsets into dma-mapped region.
Signed-off-by: Keir Fraser <keir@xensource.com>

swiotlb: dma_addr_to_phys_addr() should be static.
Signed-off-by: Keir Fraser <keir@xensource.com>

Acked-by: jbeulich@novell.com

Index: head-2007-08-07/arch/i386/kernel/pci-dma-xen.c
===================================================================
--- head-2007-08-07.orig/arch/i386/kernel/pci-dma-xen.c	2007-08-07 09:47:30.000000000 +0200
+++ head-2007-08-07/arch/i386/kernel/pci-dma-xen.c	2007-08-07 09:48:10.000000000 +0200
@@ -97,6 +97,9 @@ dma_map_sg(struct device *hwdev, struct 
 			BUG_ON(!sg[i].page);
 			IOMMU_BUG_ON(address_needs_mapping(
 				hwdev, sg[i].dma_address));
+			IOMMU_BUG_ON(range_straddles_page_boundary(
+				page_to_pseudophys(sg[i].page) + sg[i].offset,
+				sg[i].length));
 		}
 		rc = nents;
 	}
@@ -338,7 +341,7 @@ dma_map_single(struct device *dev, void 
 	} else {
 		dma = gnttab_dma_map_page(virt_to_page(ptr)) +
 		      offset_in_page(ptr);
-		IOMMU_BUG_ON(range_straddles_page_boundary(ptr, size));
+		IOMMU_BUG_ON(range_straddles_page_boundary(__pa(ptr), size));
 		IOMMU_BUG_ON(address_needs_mapping(dev, dma));
 	}
 
Index: head-2007-08-07/arch/i386/kernel/swiotlb.c
===================================================================
--- head-2007-08-07.orig/arch/i386/kernel/swiotlb.c	2007-08-07 09:47:30.000000000 +0200
+++ head-2007-08-07/arch/i386/kernel/swiotlb.c	2007-08-07 09:48:50.000000000 +0200
@@ -304,6 +304,7 @@ map_single(struct device *hwdev, struct 
 	unsigned long flags;
 	char *dma_addr;
 	unsigned int nslots, stride, index, wrap;
+	struct phys_addr slot_buf;
 	int i;
 
 	/*
@@ -375,13 +376,29 @@ map_single(struct device *hwdev, struct 
 	 * This is needed when we sync the memory.  Then we sync the buffer if
 	 * needed.
 	 */
-	io_tlb_orig_addr[index] = buffer;
+	slot_buf = buffer;
+	for (i = 0; i < nslots; i++) {
+		slot_buf.page += slot_buf.offset >> PAGE_SHIFT;
+		slot_buf.offset &= PAGE_SIZE - 1;
+		io_tlb_orig_addr[index+i] = slot_buf;
+		slot_buf.offset += 1 << IO_TLB_SHIFT;
+	}
 	if ((dir == DMA_TO_DEVICE) || (dir == DMA_BIDIRECTIONAL))
 		__sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
 
 	return dma_addr;
 }
 
+static struct phys_addr dma_addr_to_phys_addr(char *dma_addr)
+{
+	int index = (dma_addr - iotlb_virt_start) >> IO_TLB_SHIFT;
+	struct phys_addr buffer = io_tlb_orig_addr[index];
+	buffer.offset += (long)dma_addr & ((1 << IO_TLB_SHIFT) - 1);
+	buffer.page += buffer.offset >> PAGE_SHIFT;
+	buffer.offset &= PAGE_SIZE - 1;
+	return buffer;
+}
+
 /*
  * dma_addr is the kernel virtual address of the bounce buffer to unmap.
  */
@@ -391,7 +408,7 @@ unmap_single(struct device *hwdev, char 
 	unsigned long flags;
 	int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
 	int index = (dma_addr - iotlb_virt_start) >> IO_TLB_SHIFT;
-	struct phys_addr buffer = io_tlb_orig_addr[index];
+	struct phys_addr buffer = dma_addr_to_phys_addr(dma_addr);
 
 	/*
 	 * First, sync the memory before unmapping the entry
@@ -431,8 +448,7 @@ unmap_single(struct device *hwdev, char 
 static void
 sync_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
 {
-	int index = (dma_addr - iotlb_virt_start) >> IO_TLB_SHIFT;
-	struct phys_addr buffer = io_tlb_orig_addr[index];
+	struct phys_addr buffer = dma_addr_to_phys_addr(dma_addr);
 	BUG_ON((dir != DMA_FROM_DEVICE) && (dir != DMA_TO_DEVICE));
 	__sync_single(buffer, dma_addr, size, dir);
 }
@@ -480,7 +496,7 @@ swiotlb_map_single(struct device *hwdev,
 	 * we can safely return the device addr and not worry about bounce
 	 * buffering it.
 	 */
-	if (!range_straddles_page_boundary(ptr, size) &&
+	if (!range_straddles_page_boundary(__pa(ptr), size) &&
 	    !address_needs_mapping(hwdev, dev_addr))
 		return dev_addr;
 
@@ -577,7 +593,9 @@ swiotlb_map_sg(struct device *hwdev, str
 	for (i = 0; i < nelems; i++, sg++) {
 		dev_addr = gnttab_dma_map_page(sg->page) + sg->offset;
 
-		if (address_needs_mapping(hwdev, dev_addr)) {
+		if (range_straddles_page_boundary(page_to_pseudophys(sg->page)
+						  + sg->offset, sg->length)
+		    || address_needs_mapping(hwdev, dev_addr)) {
 			gnttab_dma_unmap_page(dev_addr);
 			buffer.page   = sg->page;
 			buffer.offset = sg->offset;
Index: head-2007-08-07/include/asm-i386/mach-xen/asm/dma-mapping.h
===================================================================
--- head-2007-08-07.orig/include/asm-i386/mach-xen/asm/dma-mapping.h	2007-08-07 09:47:09.000000000 +0200
+++ head-2007-08-07/include/asm-i386/mach-xen/asm/dma-mapping.h	2007-08-07 09:48:10.000000000 +0200
@@ -23,11 +23,11 @@ address_needs_mapping(struct device *hwd
 }
 
 static inline int
-range_straddles_page_boundary(void *p, size_t size)
+range_straddles_page_boundary(paddr_t p, size_t size)
 {
 	extern unsigned long *contiguous_bitmap;
-	return (((((unsigned long)p & ~PAGE_MASK) + size) > PAGE_SIZE) &&
-		!test_bit(__pa(p) >> PAGE_SHIFT, contiguous_bitmap));
+	return ((((p & ~PAGE_MASK) + size) > PAGE_SIZE) &&
+		!test_bit(p >> PAGE_SHIFT, contiguous_bitmap));
 }
 
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)