@@ -207,12 +207,9 @@ int dax_zero_range(struct inode *inode, loff_t pos, loff_t len, bool *did_zero,
207
207
int dax_truncate_page (struct inode * inode , loff_t pos , bool * did_zero ,
208
208
const struct iomap_ops * ops );
209
209
210
- static inline int dax_wait_page_idle (struct page * page ,
211
- void (cb )(struct inode * ),
212
- struct inode * inode )
210
+ static inline bool dax_page_is_idle (struct page * page )
213
211
{
214
- return ___wait_var_event (page , page_ref_count (page ) == 1 ,
215
- TASK_INTERRUPTIBLE , 0 , 0 , cb (inode ));
212
+ return page && page_ref_count (page ) == 1 ;
216
213
}
217
214
218
215
#if IS_ENABLED (CONFIG_DAX )
@@ -228,6 +225,15 @@ static inline void dax_read_unlock(int id)
228
225
{
229
226
}
230
227
#endif /* CONFIG_DAX */
228
+
229
+ #if !IS_ENABLED (CONFIG_FS_DAX )
230
+ static inline int __must_check dax_break_layout (struct inode * inode ,
231
+ loff_t start , loff_t end , void (cb )(struct inode * ))
232
+ {
233
+ return 0 ;
234
+ }
235
+ #endif
236
+
231
237
bool dax_alive (struct dax_device * dax_dev );
232
238
void * dax_get_private (struct dax_device * dax_dev );
233
239
long dax_direct_access (struct dax_device * dax_dev , pgoff_t pgoff , long nr_pages ,
@@ -251,6 +257,13 @@ vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
251
257
int dax_delete_mapping_entry (struct address_space * mapping , pgoff_t index );
252
258
int dax_invalidate_mapping_entry_sync (struct address_space * mapping ,
253
259
pgoff_t index );
260
+ int __must_check dax_break_layout (struct inode * inode , loff_t start ,
261
+ loff_t end , void (cb )(struct inode * ));
262
+ static inline int __must_check dax_break_layout_inode (struct inode * inode ,
263
+ void (cb )(struct inode * ))
264
+ {
265
+ return dax_break_layout (inode , 0 , LLONG_MAX , cb );
266
+ }
254
267
int dax_dedupe_file_range_compare (struct inode * src , loff_t srcoff ,
255
268
struct inode * dest , loff_t destoff ,
256
269
loff_t len , bool * is_same ,
0 commit comments