@@ -196,7 +196,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
196
196
return - EINVAL ;
197
197
198
198
vram_size = KFD_XCP_MEMORY_SIZE (adev , xcp_id );
199
- if (adev -> gmc . is_app_apu || adev -> flags & AMD_IS_APU ) {
199
+ if (adev -> flags & AMD_IS_APU ) {
200
200
system_mem_needed = size ;
201
201
ttm_mem_needed = size ;
202
202
}
@@ -233,7 +233,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
233
233
if (adev && xcp_id >= 0 ) {
234
234
adev -> kfd .vram_used [xcp_id ] += vram_needed ;
235
235
adev -> kfd .vram_used_aligned [xcp_id ] +=
236
- (adev -> gmc . is_app_apu || adev -> flags & AMD_IS_APU ) ?
236
+ (adev -> flags & AMD_IS_APU ) ?
237
237
vram_needed :
238
238
ALIGN (vram_needed , VRAM_AVAILABLITY_ALIGN );
239
239
}
@@ -261,7 +261,7 @@ void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
261
261
262
262
if (adev ) {
263
263
adev -> kfd .vram_used [xcp_id ] -= size ;
264
- if (adev -> gmc . is_app_apu || adev -> flags & AMD_IS_APU ) {
264
+ if (adev -> flags & AMD_IS_APU ) {
265
265
adev -> kfd .vram_used_aligned [xcp_id ] -= size ;
266
266
kfd_mem_limit .system_mem_used -= size ;
267
267
kfd_mem_limit .ttm_mem_used -= size ;
@@ -894,7 +894,7 @@ static int kfd_mem_attach(struct amdgpu_device *adev, struct kgd_mem *mem,
894
894
* if peer device has large BAR. In contrast, access over xGMI is
895
895
* allowed for both small and large BAR configurations of peer device
896
896
*/
897
- if ((adev != bo_adev && !(adev -> gmc . is_app_apu || adev -> flags & AMD_IS_APU )) &&
897
+ if ((adev != bo_adev && !(adev -> flags & AMD_IS_APU )) &&
898
898
((mem -> domain == AMDGPU_GEM_DOMAIN_VRAM ) ||
899
899
(mem -> alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL ) ||
900
900
(mem -> alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP ))) {
@@ -1682,7 +1682,7 @@ size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev,
1682
1682
- atomic64_read (& adev -> vram_pin_size )
1683
1683
- reserved_for_pt ;
1684
1684
1685
- if (adev -> gmc . is_app_apu || adev -> flags & AMD_IS_APU ) {
1685
+ if (adev -> flags & AMD_IS_APU ) {
1686
1686
system_mem_available = no_system_mem_limit ?
1687
1687
kfd_mem_limit .max_system_mem_limit :
1688
1688
kfd_mem_limit .max_system_mem_limit -
@@ -1730,7 +1730,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
1730
1730
if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM ) {
1731
1731
domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM ;
1732
1732
1733
- if (adev -> gmc . is_app_apu || adev -> flags & AMD_IS_APU ) {
1733
+ if (adev -> flags & AMD_IS_APU ) {
1734
1734
domain = AMDGPU_GEM_DOMAIN_GTT ;
1735
1735
alloc_domain = AMDGPU_GEM_DOMAIN_GTT ;
1736
1736
alloc_flags = 0 ;
@@ -1981,7 +1981,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
1981
1981
if (size ) {
1982
1982
if (!is_imported &&
1983
1983
(mem -> bo -> preferred_domains == AMDGPU_GEM_DOMAIN_VRAM ||
1984
- ((adev -> gmc . is_app_apu || adev -> flags & AMD_IS_APU ) &&
1984
+ ((adev -> flags & AMD_IS_APU ) &&
1985
1985
mem -> bo -> preferred_domains == AMDGPU_GEM_DOMAIN_GTT )))
1986
1986
* size = bo_size ;
1987
1987
else
@@ -2404,7 +2404,7 @@ static int import_obj_create(struct amdgpu_device *adev,
2404
2404
(* mem )-> bo = bo ;
2405
2405
(* mem )-> va = va ;
2406
2406
(* mem )-> domain = (bo -> preferred_domains & AMDGPU_GEM_DOMAIN_VRAM ) &&
2407
- !(adev -> gmc . is_app_apu || adev -> flags & AMD_IS_APU ) ?
2407
+ !(adev -> flags & AMD_IS_APU ) ?
2408
2408
AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT ;
2409
2409
2410
2410
(* mem )-> mapped_to_gpu_memory = 0 ;
0 commit comments