@@ -191,21 +191,21 @@ int truncate_inode_folio(struct address_space *mapping, struct folio *folio)
191
191
bool truncate_inode_partial_folio (struct folio * folio , loff_t start , loff_t end )
192
192
{
193
193
loff_t pos = folio_pos (folio );
194
+ size_t size = folio_size (folio );
194
195
unsigned int offset , length ;
195
196
struct page * split_at , * split_at2 ;
196
197
197
198
if (pos < start )
198
199
offset = start - pos ;
199
200
else
200
201
offset = 0 ;
201
- length = folio_size (folio );
202
- if (pos + length <= (u64 )end )
203
- length = length - offset ;
202
+ if (pos + size <= (u64 )end )
203
+ length = size - offset ;
204
204
else
205
205
length = end + 1 - pos - offset ;
206
206
207
207
folio_wait_writeback (folio );
208
- if (length == folio_size ( folio ) ) {
208
+ if (length == size ) {
209
209
truncate_inode_folio (folio -> mapping , folio );
210
210
return true;
211
211
}
@@ -224,16 +224,20 @@ bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end)
224
224
return true;
225
225
226
226
split_at = folio_page (folio , PAGE_ALIGN_DOWN (offset ) / PAGE_SIZE );
227
- split_at2 = folio_page (folio ,
228
- PAGE_ALIGN_DOWN (offset + length ) / PAGE_SIZE );
229
-
230
227
if (!try_folio_split (folio , split_at , NULL )) {
231
228
/*
232
229
* try to split at offset + length to make sure folios within
233
230
* the range can be dropped, especially to avoid memory waste
234
231
* for shmem truncate
235
232
*/
236
- struct folio * folio2 = page_folio (split_at2 );
233
+ struct folio * folio2 ;
234
+
235
+ if (offset + length == size )
236
+ goto no_split ;
237
+
238
+ split_at2 = folio_page (folio ,
239
+ PAGE_ALIGN_DOWN (offset + length ) / PAGE_SIZE );
240
+ folio2 = page_folio (split_at2 );
237
241
238
242
if (!folio_try_get (folio2 ))
239
243
goto no_split ;
0 commit comments