@@ -241,28 +241,28 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
241
241
}
242
242
243
243
nvbo -> contig = !(tile_flags & NOUVEAU_GEM_TILE_NONCONTIG );
244
- if (!nouveau_cli_uvmm (cli ) || internal ) {
245
- /* for BO noVM allocs, don't assign kinds */
246
- if (cli -> device .info .family >= NV_DEVICE_INFO_V0_FERMI ) {
247
- nvbo -> kind = (tile_flags & 0x0000ff00 ) >> 8 ;
248
- if (!nvif_mmu_kind_valid (mmu , nvbo -> kind )) {
249
- kfree (nvbo );
250
- return ERR_PTR (- EINVAL );
251
- }
252
244
253
- nvbo -> comp = mmu -> kind [nvbo -> kind ] != nvbo -> kind ;
254
- } else if (cli -> device .info .family >= NV_DEVICE_INFO_V0_TESLA ) {
255
- nvbo -> kind = (tile_flags & 0x00007f00 ) >> 8 ;
256
- nvbo -> comp = (tile_flags & 0x00030000 ) >> 16 ;
257
- if (!nvif_mmu_kind_valid (mmu , nvbo -> kind )) {
258
- kfree (nvbo );
259
- return ERR_PTR (- EINVAL );
260
- }
261
- } else {
262
- nvbo -> zeta = (tile_flags & 0x00000007 );
245
+ if (cli -> device .info .family >= NV_DEVICE_INFO_V0_FERMI ) {
246
+ nvbo -> kind = (tile_flags & 0x0000ff00 ) >> 8 ;
247
+ if (!nvif_mmu_kind_valid (mmu , nvbo -> kind )) {
248
+ kfree (nvbo );
249
+ return ERR_PTR (- EINVAL );
250
+ }
251
+
252
+ nvbo -> comp = mmu -> kind [nvbo -> kind ] != nvbo -> kind ;
253
+ } else if (cli -> device .info .family >= NV_DEVICE_INFO_V0_TESLA ) {
254
+ nvbo -> kind = (tile_flags & 0x00007f00 ) >> 8 ;
255
+ nvbo -> comp = (tile_flags & 0x00030000 ) >> 16 ;
256
+ if (!nvif_mmu_kind_valid (mmu , nvbo -> kind )) {
257
+ kfree (nvbo );
258
+ return ERR_PTR (- EINVAL );
263
259
}
264
- nvbo -> mode = tile_mode ;
260
+ } else {
261
+ nvbo -> zeta = (tile_flags & 0x00000007 );
262
+ }
263
+ nvbo -> mode = tile_mode ;
265
264
265
+ if (!nouveau_cli_uvmm (cli ) || internal ) {
266
266
/* Determine the desirable target GPU page size for the buffer. */
267
267
for (i = 0 ; i < vmm -> page_nr ; i ++ ) {
268
268
/* Because we cannot currently allow VMM maps to fail
@@ -304,12 +304,6 @@ nouveau_bo_alloc(struct nouveau_cli *cli, u64 *size, int *align, u32 domain,
304
304
}
305
305
nvbo -> page = vmm -> page [pi ].shift ;
306
306
} else {
307
- /* reject other tile flags when in VM mode. */
308
- if (tile_mode )
309
- return ERR_PTR (- EINVAL );
310
- if (tile_flags & ~NOUVEAU_GEM_TILE_NONCONTIG )
311
- return ERR_PTR (- EINVAL );
312
-
313
307
/* Determine the desirable target GPU page size for the buffer. */
314
308
for (i = 0 ; i < vmm -> page_nr ; i ++ ) {
315
309
/* Because we cannot currently allow VMM maps to fail
0 commit comments