|
32 | 32 | * payload.
|
33 | 33 | * One regular LZO compressed extent can have one or more segments.
|
34 | 34 | * For inlined LZO compressed extent, only one segment is allowed.
|
35 |
| - * One segment represents at most one page of uncompressed data. |
| 35 | + * One segment represents at most one sector of uncompressed data. |
36 | 36 | *
|
37 | 37 | * 2.1 Segment header
|
38 | 38 | * Fixed size. LZO_LEN (4) bytes long, LE32.
|
39 | 39 | * Records the total size of the segment (not including the header).
|
40 |
| - * Segment header never crosses page boundary, thus it's possible to |
41 |
| - * have at most 3 padding zeros at the end of the page. |
| 40 | + * Segment header never crosses sector boundary, thus it's possible to |
| 41 | + * have at most 3 padding zeros at the end of the sector. |
42 | 42 | *
|
43 | 43 | * 2.2 Data Payload
|
44 |
| - * Variable size. Size up limit should be lzo1x_worst_compress(PAGE_SIZE) |
45 |
| - * which is 4419 for a 4KiB page. |
| 44 | + * Variable size. Size up limit should be lzo1x_worst_compress(sectorsize) |
| 45 | + * which is 4419 for a 4KiB sectorsize. |
46 | 46 | *
|
47 |
| - * Example: |
| 47 | + * Example with 4K sectorsize: |
48 | 48 | * Page 1:
|
49 | 49 | * 0 0x2 0x4 0x6 0x8 0xa 0xc 0xe 0x10
|
50 | 50 | * 0x0000 | Header | SegHdr 01 | Data payload 01 ... |
|
@@ -112,163 +112,161 @@ static inline size_t read_compress_length(const char *buf)
|
112 | 112 | return le32_to_cpu(dlen);
|
113 | 113 | }
|
114 | 114 |
|
| 115 | +/* |
| 116 | + * Will do: |
| 117 | + * |
| 118 | + * - Write a segment header into the destination |
| 119 | + * - Copy the compressed buffer into the destination |
| 120 | + * - Make sure we have enough space in the last sector to fit a segment header |
| 121 | + * If not, we will pad at most (LZO_LEN (4)) - 1 bytes of zeros. |
| 122 | + * |
| 123 | + * Will allocate new pages when needed. |
| 124 | + */ |
| 125 | +static int copy_compressed_data_to_page(char *compressed_data, |
| 126 | + size_t compressed_size, |
| 127 | + struct page **out_pages, |
| 128 | + u32 *cur_out, |
| 129 | + const u32 sectorsize) |
| 130 | +{ |
| 131 | + u32 sector_bytes_left; |
| 132 | + u32 orig_out; |
| 133 | + struct page *cur_page; |
| 134 | + |
| 135 | + /* |
| 136 | + * We never allow a segment header crossing sector boundary, previous |
| 137 | + * run should ensure we have enough space left inside the sector. |
| 138 | + */ |
| 139 | + ASSERT((*cur_out / sectorsize) == (*cur_out + LZO_LEN - 1) / sectorsize); |
| 140 | + |
| 141 | + cur_page = out_pages[*cur_out / PAGE_SIZE]; |
| 142 | + /* Allocate a new page */ |
| 143 | + if (!cur_page) { |
| 144 | + cur_page = alloc_page(GFP_NOFS); |
| 145 | + if (!cur_page) |
| 146 | + return -ENOMEM; |
| 147 | + out_pages[*cur_out / PAGE_SIZE] = cur_page; |
| 148 | + } |
| 149 | + |
| 150 | + write_compress_length(page_address(cur_page) + offset_in_page(*cur_out), |
| 151 | + compressed_size); |
| 152 | + *cur_out += LZO_LEN; |
| 153 | + |
| 154 | + orig_out = *cur_out; |
| 155 | + |
| 156 | + /* Copy compressed data */ |
| 157 | + while (*cur_out - orig_out < compressed_size) { |
| 158 | + u32 copy_len = min_t(u32, sectorsize - *cur_out % sectorsize, |
| 159 | + orig_out + compressed_size - *cur_out); |
| 160 | + |
| 161 | + cur_page = out_pages[*cur_out / PAGE_SIZE]; |
| 162 | + /* Allocate a new page */ |
| 163 | + if (!cur_page) { |
| 164 | + cur_page = alloc_page(GFP_NOFS); |
| 165 | + if (!cur_page) |
| 166 | + return -ENOMEM; |
| 167 | + out_pages[*cur_out / PAGE_SIZE] = cur_page; |
| 168 | + } |
| 169 | + |
| 170 | + memcpy(page_address(cur_page) + offset_in_page(*cur_out), |
| 171 | + compressed_data + *cur_out - orig_out, copy_len); |
| 172 | + |
| 173 | + *cur_out += copy_len; |
| 174 | + } |
| 175 | + |
| 176 | + /* |
| 177 | + * Check if we can fit the next segment header into the remaining space |
| 178 | + * of the sector. |
| 179 | + */ |
| 180 | + sector_bytes_left = round_up(*cur_out, sectorsize) - *cur_out; |
| 181 | + if (sector_bytes_left >= LZO_LEN || sector_bytes_left == 0) |
| 182 | + return 0; |
| 183 | + |
| 184 | + /* The remaining size is not enough, pad it with zeros */ |
| 185 | + memset(page_address(cur_page) + offset_in_page(*cur_out), 0, |
| 186 | + sector_bytes_left); |
| 187 | + *cur_out += sector_bytes_left; |
| 188 | + return 0; |
| 189 | +} |
| 190 | + |
115 | 191 | int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
|
116 | 192 | u64 start, struct page **pages, unsigned long *out_pages,
|
117 | 193 | unsigned long *total_in, unsigned long *total_out)
|
118 | 194 | {
|
119 | 195 | struct workspace *workspace = list_entry(ws, struct workspace, list);
|
| 196 | + const u32 sectorsize = btrfs_sb(mapping->host->i_sb)->sectorsize; |
| 197 | + struct page *page_in = NULL; |
120 | 198 | int ret = 0;
|
121 |
| - char *data_in; |
122 |
| - char *cpage_out, *sizes_ptr; |
123 |
| - int nr_pages = 0; |
124 |
| - struct page *in_page = NULL; |
125 |
| - struct page *out_page = NULL; |
126 |
| - unsigned long bytes_left; |
127 |
| - unsigned long len = *total_out; |
128 |
| - unsigned long nr_dest_pages = *out_pages; |
129 |
| - const unsigned long max_out = nr_dest_pages * PAGE_SIZE; |
130 |
| - size_t in_len; |
131 |
| - size_t out_len; |
132 |
| - char *buf; |
133 |
| - unsigned long tot_in = 0; |
134 |
| - unsigned long tot_out = 0; |
135 |
| - unsigned long pg_bytes_left; |
136 |
| - unsigned long out_offset; |
137 |
| - unsigned long bytes; |
| 199 | + /* Points to the file offset of input data */ |
| 200 | + u64 cur_in = start; |
| 201 | + /* Points to the current output byte */ |
| 202 | + u32 cur_out = 0; |
| 203 | + u32 len = *total_out; |
138 | 204 |
|
139 | 205 | *out_pages = 0;
|
140 | 206 | *total_out = 0;
|
141 | 207 | *total_in = 0;
|
142 | 208 |
|
143 |
| - in_page = find_get_page(mapping, start >> PAGE_SHIFT); |
144 |
| - data_in = page_address(in_page); |
145 |
| - |
146 | 209 | /*
|
147 |
| - * store the size of all chunks of compressed data in |
148 |
| - * the first 4 bytes |
| 210 | + * Skip the header for now, we will later come back and write the total |
| 211 | + * compressed size |
149 | 212 | */
|
150 |
| - out_page = alloc_page(GFP_NOFS); |
151 |
| - if (out_page == NULL) { |
152 |
| - ret = -ENOMEM; |
153 |
| - goto out; |
154 |
| - } |
155 |
| - cpage_out = page_address(out_page); |
156 |
| - out_offset = LZO_LEN; |
157 |
| - tot_out = LZO_LEN; |
158 |
| - pages[0] = out_page; |
159 |
| - nr_pages = 1; |
160 |
| - pg_bytes_left = PAGE_SIZE - LZO_LEN; |
161 |
| - |
162 |
| - /* compress at most one page of data each time */ |
163 |
| - in_len = min(len, PAGE_SIZE); |
164 |
| - while (tot_in < len) { |
165 |
| - ret = lzo1x_1_compress(data_in, in_len, workspace->cbuf, |
166 |
| - &out_len, workspace->mem); |
167 |
| - if (ret != LZO_E_OK) { |
168 |
| - pr_debug("BTRFS: lzo in loop returned %d\n", |
169 |
| - ret); |
| 213 | + cur_out += LZO_LEN; |
| 214 | + while (cur_in < start + len) { |
| 215 | + const u32 sectorsize_mask = sectorsize - 1; |
| 216 | + u32 sector_off = (cur_in - start) & sectorsize_mask; |
| 217 | + u32 in_len; |
| 218 | + size_t out_len; |
| 219 | + |
| 220 | + /* Get the input page first */ |
| 221 | + if (!page_in) { |
| 222 | + page_in = find_get_page(mapping, cur_in >> PAGE_SHIFT); |
| 223 | + ASSERT(page_in); |
| 224 | + } |
| 225 | + |
| 226 | + /* Compress at most one sector of data each time */ |
| 227 | + in_len = min_t(u32, start + len - cur_in, sectorsize - sector_off); |
| 228 | + ASSERT(in_len); |
| 229 | + ret = lzo1x_1_compress(page_address(page_in) + |
| 230 | + offset_in_page(cur_in), in_len, |
| 231 | + workspace->cbuf, &out_len, |
| 232 | + workspace->mem); |
| 233 | + if (ret < 0) { |
| 234 | + pr_debug("BTRFS: lzo in loop returned %d\n", ret); |
170 | 235 | ret = -EIO;
|
171 | 236 | goto out;
|
172 | 237 | }
|
173 | 238 |
|
174 |
| - /* store the size of this chunk of compressed data */ |
175 |
| - write_compress_length(cpage_out + out_offset, out_len); |
176 |
| - tot_out += LZO_LEN; |
177 |
| - out_offset += LZO_LEN; |
178 |
| - pg_bytes_left -= LZO_LEN; |
179 |
| - |
180 |
| - tot_in += in_len; |
181 |
| - tot_out += out_len; |
182 |
| - |
183 |
| - /* copy bytes from the working buffer into the pages */ |
184 |
| - buf = workspace->cbuf; |
185 |
| - while (out_len) { |
186 |
| - bytes = min_t(unsigned long, pg_bytes_left, out_len); |
187 |
| - |
188 |
| - memcpy(cpage_out + out_offset, buf, bytes); |
189 |
| - |
190 |
| - out_len -= bytes; |
191 |
| - pg_bytes_left -= bytes; |
192 |
| - buf += bytes; |
193 |
| - out_offset += bytes; |
194 |
| - |
195 |
| - /* |
196 |
| - * we need another page for writing out. |
197 |
| - * |
198 |
| - * Note if there's less than 4 bytes left, we just |
199 |
| - * skip to a new page. |
200 |
| - */ |
201 |
| - if ((out_len == 0 && pg_bytes_left < LZO_LEN) || |
202 |
| - pg_bytes_left == 0) { |
203 |
| - if (pg_bytes_left) { |
204 |
| - memset(cpage_out + out_offset, 0, |
205 |
| - pg_bytes_left); |
206 |
| - tot_out += pg_bytes_left; |
207 |
| - } |
208 |
| - |
209 |
| - /* we're done, don't allocate new page */ |
210 |
| - if (out_len == 0 && tot_in >= len) |
211 |
| - break; |
212 |
| - |
213 |
| - if (nr_pages == nr_dest_pages) { |
214 |
| - out_page = NULL; |
215 |
| - ret = -E2BIG; |
216 |
| - goto out; |
217 |
| - } |
218 |
| - |
219 |
| - out_page = alloc_page(GFP_NOFS); |
220 |
| - if (out_page == NULL) { |
221 |
| - ret = -ENOMEM; |
222 |
| - goto out; |
223 |
| - } |
224 |
| - cpage_out = page_address(out_page); |
225 |
| - pages[nr_pages++] = out_page; |
226 |
| - |
227 |
| - pg_bytes_left = PAGE_SIZE; |
228 |
| - out_offset = 0; |
229 |
| - } |
230 |
| - } |
| 239 | + ret = copy_compressed_data_to_page(workspace->cbuf, out_len, |
| 240 | + pages, &cur_out, sectorsize); |
| 241 | + if (ret < 0) |
| 242 | + goto out; |
| 243 | + |
| 244 | + cur_in += in_len; |
231 | 245 |
|
232 |
| - /* we're making it bigger, give up */ |
233 |
| - if (tot_in > 8192 && tot_in < tot_out) { |
| 246 | + /* |
| 247 | + * Check if we're making it bigger after two sectors. And if |
| 248 | + * it is so, give up. |
| 249 | + */ |
| 250 | + if (cur_in - start > sectorsize * 2 && cur_in - start < cur_out) { |
234 | 251 | ret = -E2BIG;
|
235 | 252 | goto out;
|
236 | 253 | }
|
237 | 254 |
|
238 |
| - /* we're all done */ |
239 |
| - if (tot_in >= len) |
240 |
| - break; |
241 |
| - |
242 |
| - if (tot_out > max_out) |
243 |
| - break; |
244 |
| - |
245 |
| - bytes_left = len - tot_in; |
246 |
| - put_page(in_page); |
247 |
| - |
248 |
| - start += PAGE_SIZE; |
249 |
| - in_page = find_get_page(mapping, start >> PAGE_SHIFT); |
250 |
| - data_in = page_address(in_page); |
251 |
| - in_len = min(bytes_left, PAGE_SIZE); |
252 |
| - } |
253 |
| - |
254 |
| - if (tot_out >= tot_in) { |
255 |
| - ret = -E2BIG; |
256 |
| - goto out; |
| 255 | + /* Check if we have reached page boundary */ |
| 256 | + if (IS_ALIGNED(cur_in, PAGE_SIZE)) { |
| 257 | + put_page(page_in); |
| 258 | + page_in = NULL; |
| 259 | + } |
257 | 260 | }
|
258 | 261 |
|
259 |
| - /* store the size of all chunks of compressed data */ |
260 |
| - sizes_ptr = page_address(pages[0]); |
261 |
| - write_compress_length(sizes_ptr, tot_out); |
| 262 | + /* Store the size of all chunks of compressed data */ |
| 263 | + write_compress_length(page_address(pages[0]), cur_out); |
262 | 264 |
|
263 | 265 | ret = 0;
|
264 |
| - *total_out = tot_out; |
265 |
| - *total_in = tot_in; |
| 266 | + *total_out = cur_out; |
| 267 | + *total_in = cur_in - start; |
266 | 268 | out:
|
267 |
| - *out_pages = nr_pages; |
268 |
| - |
269 |
| - if (in_page) |
270 |
| - put_page(in_page); |
271 |
| - |
| 269 | + *out_pages = DIV_ROUND_UP(cur_out, PAGE_SIZE); |
272 | 270 | return ret;
|
273 | 271 | }
|
274 | 272 |
|
|
0 commit comments