diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 887 |
1 files changed, 703 insertions, 184 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 39c01b942ee4..9e16e975f31a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -41,16 +41,26 @@ #include "atom.h" #include "amdgpu_atombios.h" #include "amd_pcie.h" +#ifdef CONFIG_DRM_AMDGPU_SI +#include "si.h" +#endif #ifdef CONFIG_DRM_AMDGPU_CIK #include "cik.h" #endif #include "vi.h" #include "bif/bif_4_1_d.h" +#include <linux/pci.h> +#include <linux/firmware.h> static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev); static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev); static const char *amdgpu_asic_name[] = { + "TAHITI", + "PITCAIRN", + "VERDE", + "OLAND", + "HAINAN", "BONAIRE", "KAVERI", "KABINI", @@ -101,7 +111,7 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, bool always_indirect) { trace_amdgpu_mm_wreg(adev->pdev->device, reg, v); - + if ((reg * 4) < adev->rmmio_size && !always_indirect) writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); else { @@ -254,7 +264,8 @@ static int amdgpu_vram_scratch_init(struct amdgpu_device *adev) if (adev->vram_scratch.robj == NULL) { r = amdgpu_bo_create(adev, AMDGPU_GPU_PAGE_SIZE, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_VRAM, - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, + AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | + AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS, NULL, NULL, &adev->vram_scratch.robj); if (r) { return r; @@ -432,13 +443,9 @@ void amdgpu_doorbell_get_kfd_info(struct amdgpu_device *adev, static void amdgpu_wb_fini(struct amdgpu_device *adev) { if (adev->wb.wb_obj) { - if (!amdgpu_bo_reserve(adev->wb.wb_obj, false)) { - amdgpu_bo_kunmap(adev->wb.wb_obj); - amdgpu_bo_unpin(adev->wb.wb_obj); - amdgpu_bo_unreserve(adev->wb.wb_obj); - } - amdgpu_bo_unref(&adev->wb.wb_obj); - adev->wb.wb = NULL; + amdgpu_bo_free_kernel(&adev->wb.wb_obj, + &adev->wb.gpu_addr, + (void **)&adev->wb.wb); adev->wb.wb_obj = NULL; } } @@ -457,33 +464,14 @@ static int amdgpu_wb_init(struct amdgpu_device *adev) int r; if (adev->wb.wb_obj == NULL) { - r = amdgpu_bo_create(adev, AMDGPU_MAX_WB * 4, PAGE_SIZE, true, - AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL, - &adev->wb.wb_obj); + r = amdgpu_bo_create_kernel(adev, AMDGPU_MAX_WB * 4, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, + &adev->wb.wb_obj, &adev->wb.gpu_addr, + (void **)&adev->wb.wb); if (r) { dev_warn(adev->dev, "(%d) create WB bo failed\n", r); return r; } - r = amdgpu_bo_reserve(adev->wb.wb_obj, false); - if (unlikely(r != 0)) { - amdgpu_wb_fini(adev); - return r; - } - r = amdgpu_bo_pin(adev->wb.wb_obj, AMDGPU_GEM_DOMAIN_GTT, - &adev->wb.gpu_addr); - if (r) { - amdgpu_bo_unreserve(adev->wb.wb_obj); - dev_warn(adev->dev, "(%d) pin WB bo failed\n", r); - amdgpu_wb_fini(adev); - return r; - } - r = amdgpu_bo_kmap(adev->wb.wb_obj, (void **)&adev->wb.wb); - amdgpu_bo_unreserve(adev->wb.wb_obj); - if (r) { - dev_warn(adev->dev, "(%d) map WB bo failed\n", r); - amdgpu_wb_fini(adev); - return r; - } adev->wb.num_wb = AMDGPU_MAX_WB; memset(&adev->wb.used, 0, sizeof(adev->wb.used)); @@ -642,6 +630,46 @@ bool amdgpu_card_posted(struct amdgpu_device *adev) } +static bool amdgpu_vpost_needed(struct amdgpu_device *adev) +{ + if (amdgpu_sriov_vf(adev)) + return false; + + if (amdgpu_passthrough(adev)) { + /* for FIJI: In whole GPU pass-through virtualization case + * old smc fw won't clear some registers (e.g. MEM_SIZE, BIOS_SCRATCH) + * so amdgpu_card_posted return false and driver will incorrectly skip vPost. + * but if we force vPost do in pass-through case, the driver reload will hang. + * whether doing vPost depends on amdgpu_card_posted if smc version is above + * 00160e00 for FIJI. + */ + if (adev->asic_type == CHIP_FIJI) { + int err; + uint32_t fw_ver; + err = request_firmware(&adev->pm.fw, "amdgpu/fiji_smc.bin", adev->dev); + /* force vPost if error occured */ + if (err) + return true; + + fw_ver = *((uint32_t *)adev->pm.fw->data + 69); + if (fw_ver >= 0x00160e00) + return !amdgpu_card_posted(adev); + } + } else { + /* in bare-metal case, amdgpu_card_posted return false + * after system reboot/boot, and return true if driver + * reloaded. + * we shouldn't do vPost after driver reload otherwise GPU + * could hang. + */ + if (amdgpu_card_posted(adev)) + return false; + } + + /* we assume vPost is neede for all other cases */ + return true; +} + /** * amdgpu_dummy_page_init - init dummy page used by the driver * @@ -1001,6 +1029,13 @@ static void amdgpu_check_arguments(struct amdgpu_device *adev) amdgpu_vm_block_size); amdgpu_vm_block_size = 9; } + + if ((amdgpu_vram_page_split != -1 && amdgpu_vram_page_split < 16) || + !amdgpu_check_pot_argument(amdgpu_vram_page_split)) { + dev_warn(adev->dev, "invalid VRAM page split (%d)\n", + amdgpu_vram_page_split); + amdgpu_vram_page_split = 1024; + } } /** @@ -1026,7 +1061,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero /* don't suspend or resume card normally */ dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; - amdgpu_resume_kms(dev, true, true); + amdgpu_device_resume(dev, true, true); dev->pdev->d3_delay = d3_delay; @@ -1036,7 +1071,7 @@ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, enum vga_switchero printk(KERN_INFO "amdgpu: switched off\n"); drm_kms_helper_poll_disable(dev); dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; - amdgpu_suspend_kms(dev, true, true); + amdgpu_device_suspend(dev, true, true); dev->switch_power_state = DRM_SWITCH_POWER_OFF; } } @@ -1075,11 +1110,11 @@ int amdgpu_set_clockgating_state(struct amdgpu_device *adev, int i, r = 0; for (i = 0; i < adev->num_ip_blocks; i++) { - if (!adev->ip_block_status[i].valid) + if (!adev->ip_blocks[i].status.valid) continue; - if (adev->ip_blocks[i].type == block_type) { - r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, - state); + if (adev->ip_blocks[i].version->type == block_type) { + r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, + state); if (r) return r; break; @@ -1095,11 +1130,11 @@ int amdgpu_set_powergating_state(struct amdgpu_device *adev, int i, r = 0; for (i = 0; i < adev->num_ip_blocks; i++) { - if (!adev->ip_block_status[i].valid) + if (!adev->ip_blocks[i].status.valid) continue; - if (adev->ip_blocks[i].type == block_type) { - r = adev->ip_blocks[i].funcs->set_powergating_state((void *)adev, - state); + if (adev->ip_blocks[i].version->type == block_type) { + r = adev->ip_blocks[i].version->funcs->set_powergating_state((void *)adev, + state); if (r) return r; break; @@ -1114,10 +1149,10 @@ int amdgpu_wait_for_idle(struct amdgpu_device *adev, int i, r; for (i = 0; i < adev->num_ip_blocks; i++) { - if (!adev->ip_block_status[i].valid) + if (!adev->ip_blocks[i].status.valid) continue; - if (adev->ip_blocks[i].type == block_type) { - r = adev->ip_blocks[i].funcs->wait_for_idle((void *)adev); + if (adev->ip_blocks[i].version->type == block_type) { + r = adev->ip_blocks[i].version->funcs->wait_for_idle((void *)adev); if (r) return r; break; @@ -1133,23 +1168,22 @@ bool amdgpu_is_idle(struct amdgpu_device *adev, int i; for (i = 0; i < adev->num_ip_blocks; i++) { - if (!adev->ip_block_status[i].valid) + if (!adev->ip_blocks[i].status.valid) continue; - if (adev->ip_blocks[i].type == block_type) - return adev->ip_blocks[i].funcs->is_idle((void *)adev); + if (adev->ip_blocks[i].version->type == block_type) + return adev->ip_blocks[i].version->funcs->is_idle((void *)adev); } return true; } -const struct amdgpu_ip_block_version * amdgpu_get_ip_block( - struct amdgpu_device *adev, - enum amd_ip_block_type type) +struct amdgpu_ip_block * amdgpu_get_ip_block(struct amdgpu_device *adev, + enum amd_ip_block_type type) { int i; for (i = 0; i < adev->num_ip_blocks; i++) - if (adev->ip_blocks[i].type == type) + if (adev->ip_blocks[i].version->type == type) return &adev->ip_blocks[i]; return NULL; @@ -1170,21 +1204,86 @@ int amdgpu_ip_block_version_cmp(struct amdgpu_device *adev, enum amd_ip_block_type type, u32 major, u32 minor) { - const struct amdgpu_ip_block_version *ip_block; - ip_block = amdgpu_get_ip_block(adev, type); + struct amdgpu_ip_block *ip_block = amdgpu_get_ip_block(adev, type); - if (ip_block && ((ip_block->major > major) || - ((ip_block->major == major) && - (ip_block->minor >= minor)))) + if (ip_block && ((ip_block->version->major > major) || + ((ip_block->version->major == major) && + (ip_block->version->minor >= minor)))) return 0; return 1; } +/** + * amdgpu_ip_block_add + * + * @adev: amdgpu_device pointer + * @ip_block_version: pointer to the IP to add + * + * Adds the IP block driver information to the collection of IPs + * on the asic. + */ +int amdgpu_ip_block_add(struct amdgpu_device *adev, + const struct amdgpu_ip_block_version *ip_block_version) +{ + if (!ip_block_version) + return -EINVAL; + + adev->ip_blocks[adev->num_ip_blocks++].version = ip_block_version; + + return 0; +} + +static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev) +{ + adev->enable_virtual_display = false; + + if (amdgpu_virtual_display) { + struct drm_device *ddev = adev->ddev; + const char *pci_address_name = pci_name(ddev->pdev); + char *pciaddstr, *pciaddstr_tmp, *pciaddname_tmp, *pciaddname; + + pciaddstr = kstrdup(amdgpu_virtual_display, GFP_KERNEL); + pciaddstr_tmp = pciaddstr; + while ((pciaddname_tmp = strsep(&pciaddstr_tmp, ";"))) { + pciaddname = strsep(&pciaddname_tmp, ","); + if (!strcmp(pci_address_name, pciaddname)) { + long num_crtc; + int res = -1; + + adev->enable_virtual_display = true; + + if (pciaddname_tmp) + res = kstrtol(pciaddname_tmp, 10, + &num_crtc); + + if (!res) { + if (num_crtc < 1) + num_crtc = 1; + if (num_crtc > 6) + num_crtc = 6; + adev->mode_info.num_crtc = num_crtc; + } else { + adev->mode_info.num_crtc = 1; + } + break; + } + } + + DRM_INFO("virtual display string:%s, %s:virtual_display:%d, num_crtc:%d\n", + amdgpu_virtual_display, pci_address_name, + adev->enable_virtual_display, adev->mode_info.num_crtc); + + kfree(pciaddstr); + } +} + static int amdgpu_early_init(struct amdgpu_device *adev) { int i, r; + amdgpu_device_enable_virtual_display(adev); + switch (adev->asic_type) { case CHIP_TOPAZ: case CHIP_TONGA: @@ -1202,6 +1301,18 @@ static int amdgpu_early_init(struct amdgpu_device *adev) if (r) return r; break; +#ifdef CONFIG_DRM_AMDGPU_SI + case CHIP_VERDE: + case CHIP_TAHITI: + case CHIP_PITCAIRN: + case CHIP_OLAND: + case CHIP_HAINAN: + adev->family = AMDGPU_FAMILY_SI; + r = si_set_ip_blocks(adev); + if (r) + return r; + break; +#endif #ifdef CONFIG_DRM_AMDGPU_CIK case CHIP_BONAIRE: case CHIP_HAWAII: @@ -1223,33 +1334,24 @@ static int amdgpu_early_init(struct amdgpu_device *adev) return -EINVAL; } - adev->ip_block_status = kcalloc(adev->num_ip_blocks, - sizeof(struct amdgpu_ip_block_status), GFP_KERNEL); - if (adev->ip_block_status == NULL) - return -ENOMEM; - - if (adev->ip_blocks == NULL) { - DRM_ERROR("No IP blocks found!\n"); - return r; - } - for (i = 0; i < adev->num_ip_blocks; i++) { if ((amdgpu_ip_block_mask & (1 << i)) == 0) { DRM_ERROR("disabled ip block: %d\n", i); - adev->ip_block_status[i].valid = false; + adev->ip_blocks[i].status.valid = false; } else { - if (adev->ip_blocks[i].funcs->early_init) { - r = adev->ip_blocks[i].funcs->early_init((void *)adev); + if (adev->ip_blocks[i].version->funcs->early_init) { + r = adev->ip_blocks[i].version->funcs->early_init((void *)adev); if (r == -ENOENT) { - adev->ip_block_status[i].valid = false; + adev->ip_blocks[i].status.valid = false; } else if (r) { - DRM_ERROR("early_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); + DRM_ERROR("early_init of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); return r; } else { - adev->ip_block_status[i].valid = true; + adev->ip_blocks[i].status.valid = true; } } else { - adev->ip_block_status[i].valid = true; + adev->ip_blocks[i].status.valid = true; } } } @@ -1265,22 +1367,23 @@ static int amdgpu_init(struct amdgpu_device *adev) int i, r; for (i = 0; i < adev->num_ip_blocks; i++) { - if (!adev->ip_block_status[i].valid) + if (!adev->ip_blocks[i].status.valid) continue; - r = adev->ip_blocks[i].funcs->sw_init((void *)adev); + r = adev->ip_blocks[i].version->funcs->sw_init((void *)adev); if (r) { - DRM_ERROR("sw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); + DRM_ERROR("sw_init of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); return r; } - adev->ip_block_status[i].sw = true; + adev->ip_blocks[i].status.sw = true; /* need to do gmc hw init early so we can allocate gpu mem */ - if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) { + if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { r = amdgpu_vram_scratch_init(adev); if (r) { DRM_ERROR("amdgpu_vram_scratch_init failed %d\n", r); return r; } - r = adev->ip_blocks[i].funcs->hw_init((void *)adev); + r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); if (r) { DRM_ERROR("hw_init %d failed %d\n", i, r); return r; @@ -1290,22 +1393,23 @@ static int amdgpu_init(struct amdgpu_device *adev) DRM_ERROR("amdgpu_wb_init failed %d\n", r); return r; } - adev->ip_block_status[i].hw = true; + adev->ip_blocks[i].status.hw = true; } } for (i = 0; i < adev->num_ip_blocks; i++) { - if (!adev->ip_block_status[i].sw) + if (!adev->ip_blocks[i].status.sw) continue; /* gmc hw init is done early */ - if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) + if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) continue; - r = adev->ip_blocks[i].funcs->hw_init((void *)adev); + r = adev->ip_blocks[i].version->funcs->hw_init((void *)adev); if (r) { - DRM_ERROR("hw_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); + DRM_ERROR("hw_init of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); return r; } - adev->ip_block_status[i].hw = true; + adev->ip_blocks[i].status.hw = true; } return 0; @@ -1316,19 +1420,26 @@ static int amdgpu_late_init(struct amdgpu_device *adev) int i = 0, r; for (i = 0; i < adev->num_ip_blocks; i++) { - if (!adev->ip_block_status[i].valid) + if (!adev->ip_blocks[i].status.valid) continue; - /* enable clockgating to save power */ - r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, - AMD_CG_STATE_GATE); - if (r) { - DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); - return r; + if (adev->ip_blocks[i].version->funcs->late_init) { + r = adev->ip_blocks[i].version->funcs->late_init((void *)adev); + if (r) { + DRM_ERROR("late_init of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); + return r; + } + adev->ip_blocks[i].status.late_initialized = true; } - if (adev->ip_blocks[i].funcs->late_init) { - r = adev->ip_blocks[i].funcs->late_init((void *)adev); + /* skip CG for VCE/UVD, it's handled specially */ + if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD && + adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE) { + /* enable clockgating to save power */ + r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, + AMD_CG_STATE_GATE); if (r) { - DRM_ERROR("late_init of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); + DRM_ERROR("set_clockgating_state(gate) of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); return r; } } @@ -1341,43 +1452,73 @@ static int amdgpu_fini(struct amdgpu_device *adev) { int i, r; + /* need to disable SMC first */ + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_blocks[i].status.hw) + continue; + if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { + /* ungate blocks before hw fini so that we can shutdown the blocks safely */ + r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, + AMD_CG_STATE_UNGATE); + if (r) { + DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); + return r; + } + r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); + /* XXX handle errors */ + if (r) { + DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); + } + adev->ip_blocks[i].status.hw = false; + break; + } + } + for (i = adev->num_ip_blocks - 1; i >= 0; i--) { - if (!adev->ip_block_status[i].hw) + if (!adev->ip_blocks[i].status.hw) continue; - if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) { + if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) { amdgpu_wb_fini(adev); amdgpu_vram_scratch_fini(adev); } /* ungate blocks before hw fini so that we can shutdown the blocks safely */ - r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, - AMD_CG_STATE_UNGATE); + r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, + AMD_CG_STATE_UNGATE); if (r) { - DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); + DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); return r; } - r = adev->ip_blocks[i].funcs->hw_fini((void *)adev); + r = adev->ip_blocks[i].version->funcs->hw_fini((void *)adev); /* XXX handle errors */ if (r) { - DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); + DRM_DEBUG("hw_fini of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); } - adev->ip_block_status[i].hw = false; + adev->ip_blocks[i].status.hw = false; } for (i = adev->num_ip_blocks - 1; i >= 0; i--) { - if (!adev->ip_block_status[i].sw) + if (!adev->ip_blocks[i].status.sw) continue; - r = adev->ip_blocks[i].funcs->sw_fini((void *)adev); + r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev); /* XXX handle errors */ if (r) { - DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); + DRM_DEBUG("sw_fini of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); } - adev->ip_block_status[i].sw = false; - adev->ip_block_status[i].valid = false; + adev->ip_blocks[i].status.sw = false; + adev->ip_blocks[i].status.valid = false; } for (i = adev->num_ip_blocks - 1; i >= 0; i--) { - if (adev->ip_blocks[i].funcs->late_fini) - adev->ip_blocks[i].funcs->late_fini((void *)adev); + if (!adev->ip_blocks[i].status.late_initialized) + continue; + if (adev->ip_blocks[i].version->funcs->late_fini) + adev->ip_blocks[i].version->funcs->late_fini((void *)adev); + adev->ip_blocks[i].status.late_initialized = false; } return 0; @@ -1395,21 +1536,23 @@ static int amdgpu_suspend(struct amdgpu_device *adev) } for (i = adev->num_ip_blocks - 1; i >= 0; i--) { - if (!adev->ip_block_status[i].valid) + if (!adev->ip_blocks[i].status.valid) continue; /* ungate blocks so that suspend can properly shut them down */ if (i != AMD_IP_BLOCK_TYPE_SMC) { - r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, - AMD_CG_STATE_UNGATE); + r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, + AMD_CG_STATE_UNGATE); if (r) { - DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); + DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); } } /* XXX handle errors */ - r = adev->ip_blocks[i].funcs->suspend(adev); + r = adev->ip_blocks[i].version->funcs->suspend(adev); /* XXX handle errors */ if (r) { - DRM_ERROR("suspend of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); + DRM_ERROR("suspend of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); } } @@ -1421,11 +1564,12 @@ static int amdgpu_resume(struct amdgpu_device *adev) int i, r; for (i = 0; i < adev->num_ip_blocks; i++) { - if (!adev->ip_block_status[i].valid) + if (!adev->ip_blocks[i].status.valid) continue; - r = adev->ip_blocks[i].funcs->resume(adev); + r = adev->ip_blocks[i].version->funcs->resume(adev); if (r) { - DRM_ERROR("resume of IP block <%s> failed %d\n", adev->ip_blocks[i].funcs->name, r); + DRM_ERROR("resume of IP block <%s> failed %d\n", + adev->ip_blocks[i].version->funcs->name, r); return r; } } @@ -1433,13 +1577,10 @@ static int amdgpu_resume(struct amdgpu_device *adev) return 0; } -static bool amdgpu_device_is_virtual(void) +static void amdgpu_device_detect_sriov_bios(struct amdgpu_device *adev) { -#ifdef CONFIG_X86 - return boot_cpu_has(X86_FEATURE_HYPERVISOR); -#else - return false; -#endif + if (amdgpu_atombios_has_gpu_virtualization_table(adev)) + adev->virtualization.virtual_caps |= AMDGPU_SRIOV_CAPS_SRIOV_VBIOS; } /** @@ -1461,6 +1602,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, { int r, i; bool runtime = false; + u32 max_MBps; adev->shutdown = false; adev->dev = &pdev->dev; @@ -1478,12 +1620,14 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->vm_manager.vm_pte_funcs = NULL; adev->vm_manager.vm_pte_num_rings = 0; adev->gart.gart_funcs = NULL; - adev->fence_context = fence_context_alloc(AMDGPU_MAX_RINGS); + adev->fence_context = dma_fence_context_alloc(AMDGPU_MAX_RINGS); adev->smc_rreg = &amdgpu_invalid_rreg; adev->smc_wreg = &amdgpu_invalid_wreg; adev->pcie_rreg = &amdgpu_invalid_rreg; adev->pcie_wreg = &amdgpu_invalid_wreg; + adev->pciep_rreg = &amdgpu_invalid_rreg; + adev->pciep_wreg = &amdgpu_invalid_wreg; adev->uvd_ctx_rreg = &amdgpu_invalid_rreg; adev->uvd_ctx_wreg = &amdgpu_invalid_wreg; adev->didt_rreg = &amdgpu_invalid_rreg; @@ -1520,9 +1664,22 @@ int amdgpu_device_init(struct amdgpu_device *adev, spin_lock_init(&adev->didt_idx_lock); spin_lock_init(&adev->gc_cac_idx_lock); spin_lock_init(&adev->audio_endpt_idx_lock); + spin_lock_init(&adev->mm_stats.lock); + + INIT_LIST_HEAD(&adev->shadow_list); + mutex_init(&adev->shadow_list_lock); + + INIT_LIST_HEAD(&adev->gtt_list); + spin_lock_init(&adev->gtt_list_lock); + + if (adev->asic_type >= CHIP_BONAIRE) { + adev->rmmio_base = pci_resource_start(adev->pdev, 5); + adev->rmmio_size = pci_resource_len(adev->pdev, 5); + } else { + adev->rmmio_base = pci_resource_start(adev->pdev, 2); + adev->rmmio_size = pci_resource_len(adev->pdev, 2); + } - adev->rmmio_base = pci_resource_start(adev->pdev, 5); - adev->rmmio_size = pci_resource_len(adev->pdev, 5); adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size); if (adev->rmmio == NULL) { return -ENOMEM; @@ -1530,8 +1687,9 @@ int amdgpu_device_init(struct amdgpu_device *adev, DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base); DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size); - /* doorbell bar mapping */ - amdgpu_doorbell_init(adev); + if (adev->asic_type >= CHIP_BONAIRE) + /* doorbell bar mapping */ + amdgpu_doorbell_init(adev); /* io port mapping */ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { @@ -1579,25 +1737,24 @@ int amdgpu_device_init(struct amdgpu_device *adev, goto failed; } - /* See if the asic supports SR-IOV */ - adev->virtualization.supports_sr_iov = - amdgpu_atombios_has_gpu_virtualization_table(adev); - - /* Check if we are executing in a virtualized environment */ - adev->virtualization.is_virtual = amdgpu_device_is_virtual(); - adev->virtualization.caps = amdgpu_asic_get_virtual_caps(adev); + /* detect if we are with an SRIOV vbios */ + amdgpu_device_detect_sriov_bios(adev); /* Post card if necessary */ - if (!amdgpu_card_posted(adev) || - (adev->virtualization.is_virtual && - !(adev->virtualization.caps & AMDGPU_VIRT_CAPS_SRIOV_EN))) { + if (amdgpu_vpost_needed(adev)) { if (!adev->bios) { - dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n"); + dev_err(adev->dev, "no vBIOS found\n"); r = -EINVAL; goto failed; } - DRM_INFO("GPU not posted. posting now...\n"); - amdgpu_atom_asic_init(adev->mode_info.atom_context); + DRM_INFO("GPU posting now...\n"); + r = amdgpu_atom_asic_init(adev->mode_info.atom_context); + if (r) { + dev_err(adev->dev, "gpu post error!\n"); + goto failed; + } + } else { + DRM_INFO("GPU post is not needed\n"); } /* Initialize clocks */ @@ -1628,6 +1785,14 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->accel_working = true; + /* Initialize the buffer migration limit. */ + if (amdgpu_moverate >= 0) + max_MBps = amdgpu_moverate; + else + max_MBps = 8; /* Allow 8 MB/s. */ + /* Get a log2 for easy divisions. */ + adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps)); + amdgpu_fbdev_init(adev); r = amdgpu_ib_pool_init(adev); @@ -1715,8 +1880,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev) amdgpu_fence_driver_fini(adev); amdgpu_fbdev_fini(adev); r = amdgpu_fini(adev); - kfree(adev->ip_block_status); - adev->ip_block_status = NULL; adev->accel_working = false; /* free i2c buses */ amdgpu_i2c_fini(adev); @@ -1732,7 +1895,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev) adev->rio_mem = NULL; iounmap(adev->rmmio); adev->rmmio = NULL; - amdgpu_doorbell_fini(adev); + if (adev->asic_type >= CHIP_BONAIRE) + amdgpu_doorbell_fini(adev); amdgpu_debugfs_regs_cleanup(adev); amdgpu_debugfs_remove_files(adev); } @@ -1742,7 +1906,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev) * Suspend & resume. */ /** - * amdgpu_suspend_kms - initiate device suspend + * amdgpu_device_suspend - initiate device suspend * * @pdev: drm dev pointer * @state: suspend state @@ -1751,7 +1915,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev) * Returns 0 for success or an error on failure. * Called at driver suspend. */ -int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon) +int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) { struct amdgpu_device *adev; struct drm_crtc *crtc; @@ -1811,14 +1975,22 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon) r = amdgpu_suspend(adev); - /* evict remaining vram memory */ + /* evict remaining vram memory + * This second call to evict vram is to evict the gart page table + * using the CPU. + */ amdgpu_bo_evict_vram(adev); + amdgpu_atombios_scratch_regs_save(adev); pci_save_state(dev->pdev); if (suspend) { /* Shut down the device */ pci_disable_device(dev->pdev); pci_set_power_state(dev->pdev, PCI_D3hot); + } else { + r = amdgpu_asic_reset(adev); + if (r) + DRM_ERROR("amdgpu asic reset failed\n"); } if (fbcon) { @@ -1830,7 +2002,7 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon) } /** - * amdgpu_resume_kms - initiate device resume + * amdgpu_device_resume - initiate device resume * * @pdev: drm dev pointer * @@ -1838,7 +2010,7 @@ int amdgpu_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon) * Returns 0 for success or an error on failure. * Called at driver resume. */ -int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon) +int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) { struct drm_connector *connector; struct amdgpu_device *adev = dev->dev_private; @@ -1848,22 +2020,27 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon) if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; - if (fbcon) { + if (fbcon) console_lock(); - } + if (resume) { pci_set_power_state(dev->pdev, PCI_D0); pci_restore_state(dev->pdev); - if (pci_enable_device(dev->pdev)) { + r = pci_enable_device(dev->pdev); + if (r) { if (fbcon) console_unlock(); - return -1; + return r; } } + amdgpu_atombios_scratch_regs_restore(adev); /* post card */ - if (!amdgpu_card_posted(adev)) - amdgpu_atom_asic_init(adev->mode_info.atom_context); + if (!amdgpu_card_posted(adev) || !resume) { + r = amdgpu_atom_asic_init(adev->mode_info.atom_context); + if (r) + DRM_ERROR("amdgpu asic init failed\n"); + } r = amdgpu_resume(adev); if (r) @@ -1937,6 +2114,135 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon) return 0; } +static bool amdgpu_check_soft_reset(struct amdgpu_device *adev) +{ + int i; + bool asic_hang = false; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_blocks[i].status.valid) + continue; + if (adev->ip_blocks[i].version->funcs->check_soft_reset) + adev->ip_blocks[i].status.hang = + adev->ip_blocks[i].version->funcs->check_soft_reset(adev); + if (adev->ip_blocks[i].status.hang) { + DRM_INFO("IP block:%s is hung!\n", adev->ip_blocks[i].version->funcs->name); + asic_hang = true; + } + } + return asic_hang; +} + +static int amdgpu_pre_soft_reset(struct amdgpu_device *adev) +{ + int i, r = 0; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_blocks[i].status.valid) + continue; + if (adev->ip_blocks[i].status.hang && + adev->ip_blocks[i].version->funcs->pre_soft_reset) { + r = adev->ip_blocks[i].version->funcs->pre_soft_reset(adev); + if (r) + return r; + } + } + + return 0; +} + +static bool amdgpu_need_full_reset(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_blocks[i].status.valid) + continue; + if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) || + (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) || + (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) || + (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)) { + if (adev->ip_blocks[i].status.hang) { + DRM_INFO("Some block need full reset!\n"); + return true; + } + } + } + return false; +} + +static int amdgpu_soft_reset(struct amdgpu_device *adev) +{ + int i, r = 0; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_blocks[i].status.valid) + continue; + if (adev->ip_blocks[i].status.hang && + adev->ip_blocks[i].version->funcs->soft_reset) { + r = adev->ip_blocks[i].version->funcs->soft_reset(adev); + if (r) + return r; + } + } + + return 0; +} + +static int amdgpu_post_soft_reset(struct amdgpu_device *adev) +{ + int i, r = 0; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_blocks[i].status.valid) + continue; + if (adev->ip_blocks[i].status.hang && + adev->ip_blocks[i].version->funcs->post_soft_reset) + r = adev->ip_blocks[i].version->funcs->post_soft_reset(adev); + if (r) + return r; + } + + return 0; +} + +bool amdgpu_need_backup(struct amdgpu_device *adev) +{ + if (adev->flags & AMD_IS_APU) + return false; + + return amdgpu_lockup_timeout > 0 ? true : false; +} + +static int amdgpu_recover_vram_from_shadow(struct amdgpu_device *adev, + struct amdgpu_ring *ring, + struct amdgpu_bo *bo, + struct dma_fence **fence) +{ + uint32_t domain; + int r; + + if (!bo->shadow) + return 0; + + r = amdgpu_bo_reserve(bo, false); + if (r) + return r; + domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); + /* if bo has been evicted, then no need to recover */ + if (domain == AMDGPU_GEM_DOMAIN_VRAM) { + r = amdgpu_bo_restore_from_shadow(adev, ring, bo, + NULL, fence, true); + if (r) { + DRM_ERROR("recover page table failed!\n"); + goto err; + } + } +err: + amdgpu_bo_unreserve(bo); + return r; +} + /** * amdgpu_gpu_reset - reset the asic * @@ -1949,6 +2255,12 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev) { int i, r; int resched; + bool need_full_reset; + + if (!amdgpu_check_soft_reset(adev)) { + DRM_INFO("No hardware hang detected. Did some blocks stall?\n"); + return 0; + } atomic_inc(&adev->gpu_reset_counter); @@ -1967,40 +2279,90 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev) /* after all hw jobs are reset, hw fence is meaningless, so force_completion */ amdgpu_fence_driver_force_completion(adev); - /* save scratch */ - amdgpu_atombios_scratch_regs_save(adev); - r = amdgpu_suspend(adev); + need_full_reset = amdgpu_need_full_reset(adev); -retry: - /* Disable fb access */ - if (adev->mode_info.num_crtc) { - struct amdgpu_mode_mc_save save; - amdgpu_display_stop_mc_access(adev, &save); - amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC); |