Re: [PATCH v2] fs: dax: Adding new return type vm_fault_t

From: kbuild test robot
Date: Mon Apr 23 2018 - 03:41:09 EST


Hi Souptick,

Thank you for the patch! Yet something to improve:

[auto build test ERROR on linus/master]
[also build test ERROR on v4.17-rc2 next-20180420]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]

url: https://github.com/0day-ci/linux/commits/Souptick-Joarder/fs-dax-Adding-new-return-type-vm_fault_t/20180423-102814
config: x86_64-randconfig-u0-04230854 (attached as .config)
compiler: gcc-5 (Debian 5.5.0-3) 5.4.1 20171010
reproduce:
# save the attached .config to linux build tree
make ARCH=x86_64

All errors (new ones prefixed by >>):

fs/dax.c: In function 'dax_iomap_pte_fault':
>> fs/dax.c:1265:10: error: implicit declaration of function 'vmf_insert_mixed_mkwrite' [-Werror=implicit-function-declaration]
ret = vmf_insert_mixed_mkwrite(vma, vaddr, pfn);
^
cc1: some warnings being treated as errors

vim +/vmf_insert_mixed_mkwrite +1265 fs/dax.c

1134
1135 static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
1136 int *iomap_errp, const struct iomap_ops *ops)
1137 {
1138 struct vm_area_struct *vma = vmf->vma;
1139 struct address_space *mapping = vma->vm_file->f_mapping;
1140 struct inode *inode = mapping->host;
1141 unsigned long vaddr = vmf->address;
1142 loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
1143 struct iomap iomap = { 0 };
1144 unsigned flags = IOMAP_FAULT;
1145 int error, major = 0;
1146 bool write = vmf->flags & FAULT_FLAG_WRITE;
1147 bool sync;
1148 vm_fault_t ret = 0;
1149 void *entry;
1150 pfn_t pfn;
1151
1152 trace_dax_pte_fault(inode, vmf, ret);
1153 /*
1154 * Check whether offset isn't beyond end of file now. Caller is supposed
1155 * to hold locks serializing us with truncate / punch hole so this is
1156 * a reliable test.
1157 */
1158 if (pos >= i_size_read(inode)) {
1159 ret = VM_FAULT_SIGBUS;
1160 goto out;
1161 }
1162
1163 if (write && !vmf->cow_page)
1164 flags |= IOMAP_WRITE;
1165
1166 entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
1167 if (IS_ERR(entry)) {
1168 ret = dax_fault_return(PTR_ERR(entry));
1169 goto out;
1170 }
1171
1172 /*
1173 * It is possible, particularly with mixed reads & writes to private
1174 * mappings, that we have raced with a PMD fault that overlaps with
1175 * the PTE we need to set up. If so just return and the fault will be
1176 * retried.
1177 */
1178 if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
1179 ret = VM_FAULT_NOPAGE;
1180 goto unlock_entry;
1181 }
1182
1183 /*
1184 * Note that we don't bother to use iomap_apply here: DAX required
1185 * the file system block size to be equal the page size, which means
1186 * that we never have to deal with more than a single extent here.
1187 */
1188 error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
1189 if (iomap_errp)
1190 *iomap_errp = error;
1191 if (error) {
1192 ret = dax_fault_return(error);
1193 goto unlock_entry;
1194 }
1195 if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
1196 error = -EIO; /* fs corruption? */
1197 goto error_finish_iomap;
1198 }
1199
1200 if (vmf->cow_page) {
1201 sector_t sector = dax_iomap_sector(&iomap, pos);
1202
1203 switch (iomap.type) {
1204 case IOMAP_HOLE:
1205 case IOMAP_UNWRITTEN:
1206 clear_user_highpage(vmf->cow_page, vaddr);
1207 break;
1208 case IOMAP_MAPPED:
1209 error = copy_user_dax(iomap.bdev, iomap.dax_dev,
1210 sector, PAGE_SIZE, vmf->cow_page, vaddr);
1211 break;
1212 default:
1213 WARN_ON_ONCE(1);
1214 error = -EIO;
1215 break;
1216 }
1217
1218 if (error)
1219 goto error_finish_iomap;
1220
1221 __SetPageUptodate(vmf->cow_page);
1222 ret = finish_fault(vmf);
1223 if (!ret)
1224 ret = VM_FAULT_DONE_COW;
1225 goto finish_iomap;
1226 }
1227
1228 sync = dax_fault_is_synchronous(flags, vma, &iomap);
1229
1230 switch (iomap.type) {
1231 case IOMAP_MAPPED:
1232 if (iomap.flags & IOMAP_F_NEW) {
1233 count_vm_event(PGMAJFAULT);
1234 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
1235 major = VM_FAULT_MAJOR;
1236 }
1237 error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn);
1238 if (error < 0)
1239 goto error_finish_iomap;
1240
1241 entry = dax_insert_mapping_entry(mapping, vmf, entry, pfn,
1242 0, write && !sync);
1243 if (IS_ERR(entry)) {
1244 error = PTR_ERR(entry);
1245 goto error_finish_iomap;
1246 }
1247
1248 /*
1249 * If we are doing synchronous page fault and inode needs fsync,
1250 * we can insert PTE into page tables only after that happens.
1251 * Skip insertion for now and return the pfn so that caller can
1252 * insert it after fsync is done.
1253 */
1254 if (sync) {
1255 if (WARN_ON_ONCE(!pfnp)) {
1256 error = -EIO;
1257 goto error_finish_iomap;
1258 }
1259 *pfnp = pfn;
1260 ret = VM_FAULT_NEEDDSYNC | major;
1261 goto finish_iomap;
1262 }
1263 trace_dax_insert_mapping(inode, vmf, entry);
1264 if (write)
> 1265 ret = vmf_insert_mixed_mkwrite(vma, vaddr, pfn);
1266 else
1267 ret = vmf_insert_mixed(vma, vaddr, pfn);
1268
1269 goto finish_iomap;
1270 case IOMAP_UNWRITTEN:
1271 case IOMAP_HOLE:
1272 if (!write) {
1273 ret = dax_load_hole(mapping, entry, vmf);
1274 goto finish_iomap;
1275 }
1276 /*FALLTHRU*/
1277 default:
1278 WARN_ON_ONCE(1);
1279 error = -EIO;
1280 break;
1281 }
1282
1283 error_finish_iomap:
1284 ret = dax_fault_return(error) | major;
1285 finish_iomap:
1286 if (ops->iomap_end) {
1287 int copied = PAGE_SIZE;
1288
1289 if (ret & VM_FAULT_ERROR)
1290 copied = 0;
1291 /*
1292 * The fault is done by now and there's no way back (other
1293 * thread may be already happily using PTE we have installed).
1294 * Just ignore error from ->iomap_end since we cannot do much
1295 * with it.
1296 */
1297 ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
1298 }
1299 unlock_entry:
1300 put_locked_mapping_entry(mapping, vmf->pgoff);
1301 out:
1302 trace_dax_pte_fault_done(inode, vmf, ret);
1303 return ret;
1304 }
1305

---
0-DAY kernel test infrastructure Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all Intel Corporation

Attachment: .config.gz
Description: application/gzip