20
20
#include <linux/refcount.h>
21
21
#include "compression.h"
22
22
23
+ /* workspace buffer size for s390 zlib hardware support */
24
+ #define ZLIB_DFLTCC_BUF_SIZE (4 * PAGE_SIZE)
25
+
23
26
struct workspace {
24
27
z_stream strm ;
25
28
char * buf ;
29
+ unsigned int buf_size ;
26
30
struct list_head list ;
27
31
int level ;
28
32
};
@@ -61,7 +65,21 @@ struct list_head *zlib_alloc_workspace(unsigned int level)
61
65
zlib_inflate_workspacesize ());
62
66
workspace -> strm .workspace = kvmalloc (workspacesize , GFP_KERNEL );
63
67
workspace -> level = level ;
64
- workspace -> buf = kmalloc (PAGE_SIZE , GFP_KERNEL );
68
+ workspace -> buf = NULL ;
69
+ /*
70
+ * In case of s390 zlib hardware support, allocate lager workspace
71
+ * buffer. If allocator fails, fall back to a single page buffer.
72
+ */
73
+ if (zlib_deflate_dfltcc_enabled ()) {
74
+ workspace -> buf = kmalloc (ZLIB_DFLTCC_BUF_SIZE ,
75
+ __GFP_NOMEMALLOC | __GFP_NORETRY |
76
+ __GFP_NOWARN | GFP_NOIO );
77
+ workspace -> buf_size = ZLIB_DFLTCC_BUF_SIZE ;
78
+ }
79
+ if (!workspace -> buf ) {
80
+ workspace -> buf = kmalloc (PAGE_SIZE , GFP_KERNEL );
81
+ workspace -> buf_size = PAGE_SIZE ;
82
+ }
65
83
if (!workspace -> strm .workspace || !workspace -> buf )
66
84
goto fail ;
67
85
@@ -85,6 +103,7 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
85
103
struct page * in_page = NULL ;
86
104
struct page * out_page = NULL ;
87
105
unsigned long bytes_left ;
106
+ unsigned int in_buf_pages ;
88
107
unsigned long len = * total_out ;
89
108
unsigned long nr_dest_pages = * out_pages ;
90
109
const unsigned long max_out = nr_dest_pages * PAGE_SIZE ;
@@ -102,9 +121,6 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
102
121
workspace -> strm .total_in = 0 ;
103
122
workspace -> strm .total_out = 0 ;
104
123
105
- in_page = find_get_page (mapping , start >> PAGE_SHIFT );
106
- data_in = kmap (in_page );
107
-
108
124
out_page = alloc_page (GFP_NOFS | __GFP_HIGHMEM );
109
125
if (out_page == NULL ) {
110
126
ret = - ENOMEM ;
@@ -114,12 +130,51 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
114
130
pages [0 ] = out_page ;
115
131
nr_pages = 1 ;
116
132
117
- workspace -> strm .next_in = data_in ;
133
+ workspace -> strm .next_in = workspace -> buf ;
134
+ workspace -> strm .avail_in = 0 ;
118
135
workspace -> strm .next_out = cpage_out ;
119
136
workspace -> strm .avail_out = PAGE_SIZE ;
120
- workspace -> strm .avail_in = min (len , PAGE_SIZE );
121
137
122
138
while (workspace -> strm .total_in < len ) {
139
+ /*
140
+ * Get next input pages and copy the contents to
141
+ * the workspace buffer if required.
142
+ */
143
+ if (workspace -> strm .avail_in == 0 ) {
144
+ bytes_left = len - workspace -> strm .total_in ;
145
+ in_buf_pages = min (DIV_ROUND_UP (bytes_left , PAGE_SIZE ),
146
+ workspace -> buf_size / PAGE_SIZE );
147
+ if (in_buf_pages > 1 ) {
148
+ int i ;
149
+
150
+ for (i = 0 ; i < in_buf_pages ; i ++ ) {
151
+ if (in_page ) {
152
+ kunmap (in_page );
153
+ put_page (in_page );
154
+ }
155
+ in_page = find_get_page (mapping ,
156
+ start >> PAGE_SHIFT );
157
+ data_in = kmap (in_page );
158
+ memcpy (workspace -> buf + i * PAGE_SIZE ,
159
+ data_in , PAGE_SIZE );
160
+ start += PAGE_SIZE ;
161
+ }
162
+ workspace -> strm .next_in = workspace -> buf ;
163
+ } else {
164
+ if (in_page ) {
165
+ kunmap (in_page );
166
+ put_page (in_page );
167
+ }
168
+ in_page = find_get_page (mapping ,
169
+ start >> PAGE_SHIFT );
170
+ data_in = kmap (in_page );
171
+ start += PAGE_SIZE ;
172
+ workspace -> strm .next_in = data_in ;
173
+ }
174
+ workspace -> strm .avail_in = min (bytes_left ,
175
+ (unsigned long ) workspace -> buf_size );
176
+ }
177
+
123
178
ret = zlib_deflate (& workspace -> strm , Z_SYNC_FLUSH );
124
179
if (ret != Z_OK ) {
125
180
pr_debug ("BTRFS: deflate in loop returned %d\n" ,
@@ -161,33 +216,43 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
161
216
/* we're all done */
162
217
if (workspace -> strm .total_in >= len )
163
218
break ;
164
-
165
- /* we've read in a full page, get a new one */
166
- if (workspace -> strm .avail_in == 0 ) {
167
- if (workspace -> strm .total_out > max_out )
168
- break ;
169
-
170
- bytes_left = len - workspace -> strm .total_in ;
171
- kunmap (in_page );
172
- put_page (in_page );
173
-
174
- start += PAGE_SIZE ;
175
- in_page = find_get_page (mapping ,
176
- start >> PAGE_SHIFT );
177
- data_in = kmap (in_page );
178
- workspace -> strm .avail_in = min (bytes_left ,
179
- PAGE_SIZE );
180
- workspace -> strm .next_in = data_in ;
181
- }
219
+ if (workspace -> strm .total_out > max_out )
220
+ break ;
182
221
}
183
222
workspace -> strm .avail_in = 0 ;
184
- ret = zlib_deflate (& workspace -> strm , Z_FINISH );
185
- zlib_deflateEnd (& workspace -> strm );
186
-
187
- if (ret != Z_STREAM_END ) {
188
- ret = - EIO ;
189
- goto out ;
223
+ /*
224
+ * Call deflate with Z_FINISH flush parameter providing more output
225
+ * space but no more input data, until it returns with Z_STREAM_END.
226
+ */
227
+ while (ret != Z_STREAM_END ) {
228
+ ret = zlib_deflate (& workspace -> strm , Z_FINISH );
229
+ if (ret == Z_STREAM_END )
230
+ break ;
231
+ if (ret != Z_OK && ret != Z_BUF_ERROR ) {
232
+ zlib_deflateEnd (& workspace -> strm );
233
+ ret = - EIO ;
234
+ goto out ;
235
+ } else if (workspace -> strm .avail_out == 0 ) {
236
+ /* get another page for the stream end */
237
+ kunmap (out_page );
238
+ if (nr_pages == nr_dest_pages ) {
239
+ out_page = NULL ;
240
+ ret = - E2BIG ;
241
+ goto out ;
242
+ }
243
+ out_page = alloc_page (GFP_NOFS | __GFP_HIGHMEM );
244
+ if (out_page == NULL ) {
245
+ ret = - ENOMEM ;
246
+ goto out ;
247
+ }
248
+ cpage_out = kmap (out_page );
249
+ pages [nr_pages ] = out_page ;
250
+ nr_pages ++ ;
251
+ workspace -> strm .avail_out = PAGE_SIZE ;
252
+ workspace -> strm .next_out = cpage_out ;
253
+ }
190
254
}
255
+ zlib_deflateEnd (& workspace -> strm );
191
256
192
257
if (workspace -> strm .total_out >= workspace -> strm .total_in ) {
193
258
ret = - E2BIG ;
@@ -231,7 +296,7 @@ int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
231
296
232
297
workspace -> strm .total_out = 0 ;
233
298
workspace -> strm .next_out = workspace -> buf ;
234
- workspace -> strm .avail_out = PAGE_SIZE ;
299
+ workspace -> strm .avail_out = workspace -> buf_size ;
235
300
236
301
/* If it's deflate, and it's got no preset dictionary, then
237
302
we can tell zlib to skip the adler32 check. */
@@ -270,7 +335,7 @@ int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
270
335
}
271
336
272
337
workspace -> strm .next_out = workspace -> buf ;
273
- workspace -> strm .avail_out = PAGE_SIZE ;
338
+ workspace -> strm .avail_out = workspace -> buf_size ;
274
339
275
340
if (workspace -> strm .avail_in == 0 ) {
276
341
unsigned long tmp ;
@@ -320,7 +385,7 @@ int zlib_decompress(struct list_head *ws, unsigned char *data_in,
320
385
workspace -> strm .total_in = 0 ;
321
386
322
387
workspace -> strm .next_out = workspace -> buf ;
323
- workspace -> strm .avail_out = PAGE_SIZE ;
388
+ workspace -> strm .avail_out = workspace -> buf_size ;
324
389
workspace -> strm .total_out = 0 ;
325
390
/* If it's deflate, and it's got no preset dictionary, then
326
391
we can tell zlib to skip the adler32 check. */
@@ -364,7 +429,7 @@ int zlib_decompress(struct list_head *ws, unsigned char *data_in,
364
429
buf_offset = 0 ;
365
430
366
431
bytes = min (PAGE_SIZE - pg_offset ,
367
- PAGE_SIZE - buf_offset );
432
+ PAGE_SIZE - ( buf_offset % PAGE_SIZE ) );
368
433
bytes = min (bytes , bytes_left );
369
434
370
435
kaddr = kmap_atomic (dest_page );
@@ -375,7 +440,7 @@ int zlib_decompress(struct list_head *ws, unsigned char *data_in,
375
440
bytes_left -= bytes ;
376
441
next :
377
442
workspace -> strm .next_out = workspace -> buf ;
378
- workspace -> strm .avail_out = PAGE_SIZE ;
443
+ workspace -> strm .avail_out = workspace -> buf_size ;
379
444
}
380
445
381
446
if (ret != Z_STREAM_END && bytes_left != 0 )
0 commit comments