blob: a52f0b13a2c8a1e6ea8f6682fffc74a2bb114319 [file] [log] [blame]
/*
* Copyright 2015 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: AMD
*
*/
/* The caprices of the preprocessor require that this be declared right here */
#define CREATE_TRACE_POINTS
#include "dm_services_types.h"
#include "dc.h"
#include "dc/inc/core_types.h"
#include "dal_asic_id.h"
#include "vid.h"
#include "amdgpu.h"
#include "amdgpu_display.h"
#include "amdgpu_ucode.h"
#include "atom.h"
#include "amdgpu_dm.h"
#include "amdgpu_pm.h"
#include "amd_shared.h"
#include "amdgpu_dm_irq.h"
#include "dm_helpers.h"
#include "amdgpu_dm_mst_types.h"
#if defined(CONFIG_DEBUG_FS)
#include "amdgpu_dm_debugfs.h"
#endif
#include "ivsrcid/ivsrcid_vislands30.h"
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/version.h>
#include <linux/types.h>
#include <linux/pm_runtime.h>
#include <linux/pci.h>
#include <linux/firmware.h>
#include <linux/component.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_uapi.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_dp_mst_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_edid.h>
#include <drm/drm_vblank.h>
#include <drm/drm_audio_component.h>
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
#include "dcn/dcn_1_0_offset.h"
#include "dcn/dcn_1_0_sh_mask.h"
#include "soc15_hw_ip.h"
#include "vega10_ip_offset.h"
#include "soc15_common.h"
#endif
#include "modules/inc/mod_freesync.h"
#include "modules/power/power_helpers.h"
#include "modules/inc/mod_info_packet.h"
#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
/**
* DOC: overview
*
* The AMDgpu display manager, **amdgpu_dm** (or even simpler,
* **dm**) sits between DRM and DC. It acts as a liason, converting DRM
* requests into DC requests, and DC responses into DRM responses.
*
* The root control structure is &struct amdgpu_display_manager.
*/
/* basic init/fini API */
static int amdgpu_dm_init(struct amdgpu_device *adev);
static void amdgpu_dm_fini(struct amdgpu_device *adev);
/*
* initializes drm_device display related structures, based on the information
* provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
* drm_encoder, drm_mode_config
*
* Returns 0 on success
*/
static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
/* removes and deallocates the drm structures, created by the above function */
static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
static void
amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
struct drm_plane *plane,
unsigned long possible_crtcs,
const struct dc_plane_cap *plane_cap);
static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
struct drm_plane *plane,
uint32_t link_index);
static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *amdgpu_dm_connector,
uint32_t link_index,
struct amdgpu_encoder *amdgpu_encoder);
static int amdgpu_dm_encoder_init(struct drm_device *dev,
struct amdgpu_encoder *aencoder,
uint32_t link_index);
static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
static int amdgpu_dm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state,
bool nonblock);
static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
static int amdgpu_dm_atomic_check(struct drm_device *dev,
struct drm_atomic_state *state);
static void handle_cursor_update(struct drm_plane *plane,
struct drm_plane_state *old_plane_state);
/*
* dm_vblank_get_counter
*
* @brief
* Get counter for number of vertical blanks
*
* @param
* struct amdgpu_device *adev - [in] desired amdgpu device
* int disp_idx - [in] which CRTC to get the counter from
*
* @return
* Counter for vertical blanks
*/
static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
{
if (crtc >= adev->mode_info.num_crtc)
return 0;
else {
struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
acrtc->base.state);
if (acrtc_state->stream == NULL) {
DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
crtc);
return 0;
}
return dc_stream_get_vblank_counter(acrtc_state->stream);
}
}
static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
u32 *vbl, u32 *position)
{
uint32_t v_blank_start, v_blank_end, h_position, v_position;
if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
return -EINVAL;
else {
struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
struct dm_crtc_state *acrtc_state = to_dm_crtc_state(
acrtc->base.state);
if (acrtc_state->stream == NULL) {
DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
crtc);
return 0;
}
/*
* TODO rework base driver to use values directly.
* for now parse it back into reg-format
*/
dc_stream_get_scanoutpos(acrtc_state->stream,
&v_blank_start,
&v_blank_end,
&h_position,
&v_position);
*position = v_position | (h_position << 16);
*vbl = v_blank_start | (v_blank_end << 16);
}
return 0;
}
static bool dm_is_idle(void *handle)
{
/* XXX todo */
return true;
}
static int dm_wait_for_idle(void *handle)
{
/* XXX todo */
return 0;
}
static bool dm_check_soft_reset(void *handle)
{
return false;
}
static int dm_soft_reset(void *handle)
{
/* XXX todo */
return 0;
}
static struct amdgpu_crtc *
get_crtc_by_otg_inst(struct amdgpu_device *adev,
int otg_inst)
{
struct drm_device *dev = adev->ddev;
struct drm_crtc *crtc;
struct amdgpu_crtc *amdgpu_crtc;
if (otg_inst == -1) {
WARN_ON(1);
return adev->mode_info.crtcs[0];
}
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
amdgpu_crtc = to_amdgpu_crtc(crtc);
if (amdgpu_crtc->otg_inst == otg_inst)
return amdgpu_crtc;
}
return NULL;
}
static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
{
return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
}
static void dm_pflip_high_irq(void *interrupt_params)
{
struct amdgpu_crtc *amdgpu_crtc;
struct common_irq_params *irq_params = interrupt_params;
struct amdgpu_device *adev = irq_params->adev;
unsigned long flags;
struct drm_pending_vblank_event *e;
struct dm_crtc_state *acrtc_state;
uint32_t vpos, hpos, v_blank_start, v_blank_end;
bool vrr_active;
amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
/* IRQ could occur when in initial stage */
/* TODO work and BO cleanup */
if (amdgpu_crtc == NULL) {
DRM_DEBUG_DRIVER("CRTC is null, returning.\n");
return;
}
spin_lock_irqsave(&adev->ddev->event_lock, flags);
if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
DRM_DEBUG_DRIVER("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
amdgpu_crtc->pflip_status,
AMDGPU_FLIP_SUBMITTED,
amdgpu_crtc->crtc_id,
amdgpu_crtc);
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
return;
}
/* page flip completed. */
e = amdgpu_crtc->event;
amdgpu_crtc->event = NULL;
if (!e)
WARN_ON(1);
acrtc_state = to_dm_crtc_state(amdgpu_crtc->base.state);
vrr_active = amdgpu_dm_vrr_active(acrtc_state);
/* Fixed refresh rate, or VRR scanout position outside front-porch? */
if (!vrr_active ||
!dc_stream_get_scanoutpos(acrtc_state->stream, &v_blank_start,
&v_blank_end, &hpos, &vpos) ||
(vpos < v_blank_start)) {
/* Update to correct count and vblank timestamp if racing with
* vblank irq. This also updates to the correct vblank timestamp
* even in VRR mode, as scanout is past the front-porch atm.
*/
drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
/* Wake up userspace by sending the pageflip event with proper
* count and timestamp of vblank of flip completion.
*/
if (e) {
drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
/* Event sent, so done with vblank for this flip */
drm_crtc_vblank_put(&amdgpu_crtc->base);
}
} else if (e) {
/* VRR active and inside front-porch: vblank count and
* timestamp for pageflip event will only be up to date after
* drm_crtc_handle_vblank() has been executed from late vblank
* irq handler after start of back-porch (vline 0). We queue the
* pageflip event for send-out by drm_crtc_handle_vblank() with
* updated timestamp and count, once it runs after us.
*
* We need to open-code this instead of using the helper
* drm_crtc_arm_vblank_event(), as that helper would
* call drm_crtc_accurate_vblank_count(), which we must
* not call in VRR mode while we are in front-porch!
*/
/* sequence will be replaced by real count during send-out. */
e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
e->pipe = amdgpu_crtc->crtc_id;
list_add_tail(&e->base.link, &adev->ddev->vblank_event_list);
e = NULL;
}
/* Keep track of vblank of this flip for flip throttling. We use the
* cooked hw counter, as that one incremented at start of this vblank
* of pageflip completion, so last_flip_vblank is the forbidden count
* for queueing new pageflips if vsync + VRR is enabled.
*/
amdgpu_crtc->last_flip_vblank = amdgpu_get_vblank_counter_kms(adev->ddev,
amdgpu_crtc->crtc_id);
amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
amdgpu_crtc->crtc_id, amdgpu_crtc,
vrr_active, (int) !e);
}
static void dm_vupdate_high_irq(void *interrupt_params)
{
struct common_irq_params *irq_params = interrupt_params;
struct amdgpu_device *adev = irq_params->adev;
struct amdgpu_crtc *acrtc;
struct dm_crtc_state *acrtc_state;
unsigned long flags;
acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
if (acrtc) {
acrtc_state = to_dm_crtc_state(acrtc->base.state);
DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
amdgpu_dm_vrr_active(acrtc_state));
/* Core vblank handling is done here after end of front-porch in
* vrr mode, as vblank timestamping will give valid results
* while now done after front-porch. This will also deliver
* page-flip completion events that have been queued to us
* if a pageflip happened inside front-porch.
*/
if (amdgpu_dm_vrr_active(acrtc_state)) {
drm_crtc_handle_vblank(&acrtc->base);
/* BTR processing for pre-DCE12 ASICs */
if (acrtc_state->stream &&
adev->family < AMDGPU_FAMILY_AI) {
spin_lock_irqsave(&adev->ddev->event_lock, flags);
mod_freesync_handle_v_update(
adev->dm.freesync_module,
acrtc_state->stream,
&acrtc_state->vrr_params);
dc_stream_adjust_vmin_vmax(
adev->dm.dc,
acrtc_state->stream,
&acrtc_state->vrr_params.adjust);
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
}
}
}
}
static void dm_crtc_high_irq(void *interrupt_params)
{
struct common_irq_params *irq_params = interrupt_params;
struct amdgpu_device *adev = irq_params->adev;
struct amdgpu_crtc *acrtc;
struct dm_crtc_state *acrtc_state;
unsigned long flags;
acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
if (acrtc) {
acrtc_state = to_dm_crtc_state(acrtc->base.state);
DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
amdgpu_dm_vrr_active(acrtc_state));
/* Core vblank handling at start of front-porch is only possible
* in non-vrr mode, as only there vblank timestamping will give
* valid results while done in front-porch. Otherwise defer it
* to dm_vupdate_high_irq after end of front-porch.
*/
if (!amdgpu_dm_vrr_active(acrtc_state))
drm_crtc_handle_vblank(&acrtc->base);
/* Following stuff must happen at start of vblank, for crc
* computation and below-the-range btr support in vrr mode.
*/
amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
if (acrtc_state->stream && adev->family >= AMDGPU_FAMILY_AI &&
acrtc_state->vrr_params.supported &&
acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE) {
spin_lock_irqsave(&adev->ddev->event_lock, flags);
mod_freesync_handle_v_update(
adev->dm.freesync_module,
acrtc_state->stream,
&acrtc_state->vrr_params);
dc_stream_adjust_vmin_vmax(
adev->dm.dc,
acrtc_state->stream,
&acrtc_state->vrr_params.adjust);
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
}
}
}
static int dm_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
return 0;
}
static int dm_set_powergating_state(void *handle,
enum amd_powergating_state state)
{
return 0;
}
/* Prototypes of private functions */
static int dm_early_init(void* handle);
/* Allocate memory for FBC compressed data */
static void amdgpu_dm_fbc_init(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = dev->dev_private;
struct dm_comressor_info *compressor = &adev->dm.compressor;
struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
struct drm_display_mode *mode;
unsigned long max_size = 0;
if (adev->dm.dc->fbc_compressor == NULL)
return;
if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
return;
if (compressor->bo_ptr)
return;
list_for_each_entry(mode, &connector->modes, head) {
if (max_size < mode->htotal * mode->vtotal)
max_size = mode->htotal * mode->vtotal;
}
if (max_size) {
int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
&compressor->gpu_addr, &compressor->cpu_addr);
if (r)
DRM_ERROR("DM: Failed to initialize FBC\n");
else {
adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
}
}
}
static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
int pipe, bool *enabled,
unsigned char *buf, int max_bytes)
{
struct drm_device *dev = dev_get_drvdata(kdev);
struct amdgpu_device *adev = dev->dev_private;
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
struct amdgpu_dm_connector *aconnector;
int ret = 0;
*enabled = false;
mutex_lock(&adev->dm.audio_lock);
drm_connector_list_iter_begin(dev, &conn_iter);
drm_for_each_connector_iter(connector, &conn_iter) {
aconnector = to_amdgpu_dm_connector(connector);
if (aconnector->audio_inst != port)
continue;
*enabled = true;
ret = drm_eld_size(connector->eld);
memcpy(buf, connector->eld, min(max_bytes, ret));
break;
}
drm_connector_list_iter_end(&conn_iter);
mutex_unlock(&adev->dm.audio_lock);
DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
return ret;
}
static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
.get_eld = amdgpu_dm_audio_component_get_eld,
};
static int amdgpu_dm_audio_component_bind(struct device *kdev,
struct device *hda_kdev, void *data)
{
struct drm_device *dev = dev_get_drvdata(kdev);
struct amdgpu_device *adev = dev->dev_private;
struct drm_audio_component *acomp = data;
acomp->ops = &amdgpu_dm_audio_component_ops;
acomp->dev = kdev;
adev->dm.audio_component = acomp;
return 0;
}
static void amdgpu_dm_audio_component_unbind(struct device *kdev,
struct device *hda_kdev, void *data)
{
struct drm_device *dev = dev_get_drvdata(kdev);
struct amdgpu_device *adev = dev->dev_private;
struct drm_audio_component *acomp = data;
acomp->ops = NULL;
acomp->dev = NULL;
adev->dm.audio_component = NULL;
}
static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
.bind = amdgpu_dm_audio_component_bind,
.unbind = amdgpu_dm_audio_component_unbind,
};
static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
{
int i, ret;
if (!amdgpu_audio)
return 0;
adev->mode_info.audio.enabled = true;
adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
adev->mode_info.audio.pin[i].channels = -1;
adev->mode_info.audio.pin[i].rate = -1;
adev->mode_info.audio.pin[i].bits_per_sample = -1;
adev->mode_info.audio.pin[i].status_bits = 0;
adev->mode_info.audio.pin[i].category_code = 0;
adev->mode_info.audio.pin[i].connected = false;
adev->mode_info.audio.pin[i].id =
adev->dm.dc->res_pool->audios[i]->inst;
adev->mode_info.audio.pin[i].offset = 0;
}
ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
if (ret < 0)
return ret;
adev->dm.audio_registered = true;
return 0;
}
static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
{
if (!amdgpu_audio)
return;
if (!adev->mode_info.audio.enabled)
return;
if (adev->dm.audio_registered) {
component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
adev->dm.audio_registered = false;
}
/* TODO: Disable audio? */
adev->mode_info.audio.enabled = false;
}
void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
{
struct drm_audio_component *acomp = adev->dm.audio_component;
if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
pin, -1);
}
}
static int amdgpu_dm_init(struct amdgpu_device *adev)
{
struct dc_init_data init_data;
adev->dm.ddev = adev->ddev;
adev->dm.adev = adev;
/* Zero all the fields */
memset(&init_data, 0, sizeof(init_data));
mutex_init(&adev->dm.dc_lock);
mutex_init(&adev->dm.audio_lock);
if(amdgpu_dm_irq_init(adev)) {
DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
goto error;
}
init_data.asic_id.chip_family = adev->family;
init_data.asic_id.pci_revision_id = adev->rev_id;
init_data.asic_id.hw_internal_rev = adev->external_rev_id;
init_data.asic_id.vram_width = adev->gmc.vram_width;
/* TODO: initialize init_data.asic_id.vram_type here!!!! */
init_data.asic_id.atombios_base_address =
adev->mode_info.atom_context->bios;
init_data.driver = adev;
adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
if (!adev->dm.cgs_device) {
DRM_ERROR("amdgpu: failed to create cgs device.\n");
goto error;
}
init_data.cgs_device = adev->dm.cgs_device;
init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
/*
* TODO debug why this doesn't work on Raven
*/
if (adev->flags & AMD_IS_APU &&
adev->asic_type >= CHIP_CARRIZO &&
adev->asic_type <= CHIP_RAVEN)
init_data.flags.gpu_vm_support = true;
if (amdgpu_dc_feature_mask & DC_FBC_MASK)
init_data.flags.fbc_support = true;
if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
init_data.flags.multi_mon_pp_mclk_switch = true;
init_data.flags.power_down_display_on_boot = true;
#ifdef CONFIG_DRM_AMD_DC_DCN2_0
init_data.soc_bounding_box = adev->dm.soc_bounding_box;
#endif
/* Display Core create. */
adev->dm.dc = dc_create(&init_data);
if (adev->dm.dc) {
DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
} else {
DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
goto error;
}
adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
if (!adev->dm.freesync_module) {
DRM_ERROR(
"amdgpu: failed to initialize freesync_module.\n");
} else
DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
adev->dm.freesync_module);
amdgpu_dm_init_color_mod();
if (amdgpu_dm_initialize_drm_device(adev)) {
DRM_ERROR(
"amdgpu: failed to initialize sw for display support.\n");
goto error;
}
/* Update the actual used number of crtc */
adev->mode_info.num_crtc = adev->dm.display_indexes_num;
/* TODO: Add_display_info? */
/* TODO use dynamic cursor width */
adev->ddev->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
adev->ddev->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
if (drm_vblank_init(adev->ddev, adev->dm.display_indexes_num)) {
DRM_ERROR(
"amdgpu: failed to initialize sw for display support.\n");
goto error;
}
#if defined(CONFIG_DEBUG_FS)
if (dtn_debugfs_init(adev))
DRM_ERROR("amdgpu: failed initialize dtn debugfs support.\n");
#endif
DRM_DEBUG_DRIVER("KMS initialized.\n");
return 0;
error:
amdgpu_dm_fini(adev);
return -EINVAL;
}
static void amdgpu_dm_fini(struct amdgpu_device *adev)
{
amdgpu_dm_audio_fini(adev);
amdgpu_dm_destroy_drm_device(&adev->dm);
/* DC Destroy TODO: Replace destroy DAL */
if (adev->dm.dc)
dc_destroy(&adev->dm.dc);
/*
* TODO: pageflip, vlank interrupt
*
* amdgpu_dm_irq_fini(adev);
*/
if (adev->dm.cgs_device) {
amdgpu_cgs_destroy_device(adev->dm.cgs_device);
adev->dm.cgs_device = NULL;
}
if (adev->dm.freesync_module) {
mod_freesync_destroy(adev->dm.freesync_module);
adev->dm.freesync_module = NULL;
}
mutex_destroy(&adev->dm.audio_lock);
mutex_destroy(&adev->dm.dc_lock);
return;
}
static int load_dmcu_fw(struct amdgpu_device *adev)
{
const char *fw_name_dmcu = NULL;
int r;
const struct dmcu_firmware_header_v1_0 *hdr;
switch(adev->asic_type) {
case CHIP_BONAIRE:
case CHIP_HAWAII:
case CHIP_KAVERI:
case CHIP_KABINI:
case CHIP_MULLINS:
case CHIP_TONGA:
case CHIP_FIJI:
case CHIP_CARRIZO:
case CHIP_STONEY:
case CHIP_POLARIS11:
case CHIP_POLARIS10:
case CHIP_POLARIS12:
case CHIP_VEGAM:
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_VEGA20:
case CHIP_NAVI10:
case CHIP_NAVI14:
case CHIP_NAVI12:
case CHIP_RENOIR:
return 0;
case CHIP_RAVEN:
if (ASICREV_IS_PICASSO(adev->external_rev_id))
fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
else
return 0;
break;
default:
DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
return -EINVAL;
}
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
return 0;
}
r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
if (r == -ENOENT) {
/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
adev->dm.fw_dmcu = NULL;
return 0;
}
if (r) {
dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
fw_name_dmcu);
return r;
}
r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
if (r) {
dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
fw_name_dmcu);
release_firmware(adev->dm.fw_dmcu);
adev->dm.fw_dmcu = NULL;
return r;
}
hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
adev->firmware.fw_size +=
ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
return 0;
}
static int dm_sw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
return load_dmcu_fw(adev);
}
static int dm_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if(adev->dm.fw_dmcu) {
release_firmware(adev->dm.fw_dmcu);
adev->dm.fw_dmcu = NULL;
}
return 0;
}
static int detect_mst_link_for_all_connectors(struct drm_device *dev)
{
struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector;
int ret = 0;
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
aconnector = to_amdgpu_dm_connector(connector);
if (aconnector->dc_link->type == dc_connection_mst_branch &&
aconnector->mst_mgr.aux) {
DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
aconnector, aconnector->base.base.id);
ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
if (ret < 0) {
DRM_ERROR("DM_MST: Failed to start MST\n");
((struct dc_link *)aconnector->dc_link)->type = dc_connection_single;
return ret;
}
}
}
drm_modeset_unlock(&dev->mode_config.connection_mutex);
return ret;
}
static int dm_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct dmcu_iram_parameters params;
unsigned int linear_lut[16];
int i;
struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
bool ret = false;
for (i = 0; i < 16; i++)
linear_lut[i] = 0xFFFF * i / 15;
params.set = 0;
params.backlight_ramping_start = 0xCCCC;
params.backlight_ramping_reduction = 0xCCCCCCCC;
params.backlight_lut_array_size = 16;
params.backlight_lut_array = linear_lut;
/* todo will enable for navi10 */
if (adev->asic_type <= CHIP_RAVEN) {
ret = dmcu_load_iram(dmcu, params);
if (!ret)
return -EINVAL;
}
return detect_mst_link_for_all_connectors(adev->ddev);
}
static void s3_handle_mst(struct drm_device *dev, bool suspend)
{
struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector;
struct drm_dp_mst_topology_mgr *mgr;
int ret;
bool need_hotplug = false;
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
list_for_each_entry(connector, &dev->mode_config.connector_list,
head) {
aconnector = to_amdgpu_dm_connector(connector);
if (aconnector->dc_link->type != dc_connection_mst_branch ||
aconnector->mst_port)
continue;
mgr = &aconnector->mst_mgr;
if (suspend) {
drm_dp_mst_topology_mgr_suspend(mgr);
} else {
ret = drm_dp_mst_topology_mgr_resume(mgr);
if (ret < 0) {
drm_dp_mst_topology_mgr_set_mst(mgr, false);
need_hotplug = true;
}
}
}
drm_modeset_unlock(&dev->mode_config.connection_mutex);
if (need_hotplug)
drm_kms_helper_hotplug_event(dev);
}
/**
* dm_hw_init() - Initialize DC device
* @handle: The base driver device containing the amdpgu_dm device.
*
* Initialize the &struct amdgpu_display_manager device. This involves calling
* the initializers of each DM component, then populating the struct with them.
*
* Although the function implies hardware initialization, both hardware and
* software are initialized here. Splitting them out to their relevant init
* hooks is a future TODO item.
*
* Some notable things that are initialized here:
*
* - Display Core, both software and hardware
* - DC modules that we need (freesync and color management)
* - DRM software states
* - Interrupt sources and handlers
* - Vblank support
* - Debug FS entries, if enabled
*/
static int dm_hw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* Create DAL display manager */
amdgpu_dm_init(adev);
amdgpu_dm_hpd_init(adev);
return 0;
}
/**
* dm_hw_fini() - Teardown DC device
* @handle: The base driver device containing the amdpgu_dm device.
*
* Teardown components within &struct amdgpu_display_manager that require
* cleanup. This involves cleaning up the DRM device, DC, and any modules that
* were loaded. Also flush IRQ workqueues and disable them.
*/
static int dm_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_dm_hpd_fini(adev);
amdgpu_dm_irq_fini(adev);
amdgpu_dm_fini(adev);
return 0;
}
static int dm_suspend(void *handle)
{
struct amdgpu_device *adev = handle;
struct amdgpu_display_manager *dm = &adev->dm;
int ret = 0;
WARN_ON(adev->dm.cached_state);
adev->dm.cached_state = drm_atomic_helper_suspend(adev->ddev);
s3_handle_mst(adev->ddev, true);
amdgpu_dm_irq_suspend(adev);
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
return ret;
}
static struct amdgpu_dm_connector *
amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
uint32_t i;
struct drm_connector_state *new_con_state;
struct drm_connector *connector;
struct drm_crtc *crtc_from_state;
for_each_new_connector_in_state(state, connector, new_con_state, i) {
crtc_from_state = new_con_state->crtc;
if (crtc_from_state == crtc)
return to_amdgpu_dm_connector(connector);
}
return NULL;
}
static void emulated_link_detect(struct dc_link *link)
{
struct dc_sink_init_data sink_init_data = { 0 };
struct display_sink_capability sink_caps = { 0 };
enum dc_edid_status edid_status;
struct dc_context *dc_ctx = link->ctx;
struct dc_sink *sink = NULL;
struct dc_sink *prev_sink = NULL;
link->type = dc_connection_none;
prev_sink = link->local_sink;
if (prev_sink != NULL)
dc_sink_retain(prev_sink);
switch (link->connector_signal) {
case SIGNAL_TYPE_HDMI_TYPE_A: {
sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
break;
}
case SIGNAL_TYPE_DVI_SINGLE_LINK: {
sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
break;
}
case SIGNAL_TYPE_DVI_DUAL_LINK: {
sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
break;
}
case SIGNAL_TYPE_LVDS: {
sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
sink_caps.signal = SIGNAL_TYPE_LVDS;
break;
}
case SIGNAL_TYPE_EDP: {
sink_caps.transaction_type =
DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
sink_caps.signal = SIGNAL_TYPE_EDP;
break;
}
case SIGNAL_TYPE_DISPLAY_PORT: {
sink_caps.transaction_type =
DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
break;
}
default:
DC_ERROR("Invalid connector type! signal:%d\n",
link->connector_signal);
return;
}
sink_init_data.link = link;
sink_init_data.sink_signal = sink_caps.signal;
sink = dc_sink_create(&sink_init_data);
if (!sink) {
DC_ERROR("Failed to create sink!\n");
return;
}
/* dc_sink_create returns a new reference */
link->local_sink = sink;
edid_status = dm_helpers_read_local_edid(
link->ctx,
link,
sink);
if (edid_status != EDID_OK)
DC_ERROR("Failed to read EDID");
}
static int dm_resume(void *handle)
{
struct amdgpu_device *adev = handle;
struct drm_device *ddev = adev->ddev;
struct amdgpu_display_manager *dm = &adev->dm;
struct amdgpu_dm_connector *aconnector;
struct drm_connector *connector;
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state;
struct dm_crtc_state *dm_new_crtc_state;
struct drm_plane *plane;
struct drm_plane_state *new_plane_state;
struct dm_plane_state *dm_new_plane_state;
struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
enum dc_connection_type new_connection_type = dc_connection_none;
int i;
/* Recreate dc_state - DC invalidates it when setting power state to S3. */
dc_release_state(dm_state->context);
dm_state->context = dc_create_state(dm->dc);
/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
dc_resource_state_construct(dm->dc, dm_state->context);
/* power on hardware */
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
/* program HPD filter */
dc_resume(dm->dc);
/* On resume we need to rewrite the MSTM control bits to enamble MST*/
s3_handle_mst(ddev, false);
/*
* early enable HPD Rx IRQ, should be done before set mode as short
* pulse interrupts are used for MST
*/
amdgpu_dm_irq_resume_early(adev);
/* Do detection*/
list_for_each_entry(connector, &ddev->mode_config.connector_list, head) {
aconnector = to_amdgpu_dm_connector(connector);
/*
* this is the case when traversing through already created
* MST connectors, should be skipped
*/
if (aconnector->mst_port)
continue;
mutex_lock(&aconnector->hpd_lock);
if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
DRM_ERROR("KMS: Failed to detect connector\n");
if (aconnector->base.force && new_connection_type == dc_connection_none)
emulated_link_detect(aconnector->dc_link);
else
dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
if (aconnector->fake_enable && aconnector->dc_link->local_sink)
aconnector->fake_enable = false;
if (aconnector->dc_sink)
dc_sink_release(aconnector->dc_sink);
aconnector->dc_sink = NULL;
amdgpu_dm_update_connector_after_detect(aconnector);
mutex_unlock(&aconnector->hpd_lock);
}
/* Force mode set in atomic commit */
for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
new_crtc_state->active_changed = true;
/*
* atomic_check is expected to create the dc states. We need to release
* them here, since they were duplicated as part of the suspend
* procedure.
*/
for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
if (dm_new_crtc_state->stream) {
WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
dc_stream_release(dm_new_crtc_state->stream);
dm_new_crtc_state->stream = NULL;
}
}
for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
dm_new_plane_state = to_dm_plane_state(new_plane_state);
if (dm_new_plane_state->dc_state) {
WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
dc_plane_state_release(dm_new_plane_state->dc_state);
dm_new_plane_state->dc_state = NULL;
}
}
drm_atomic_helper_resume(ddev, dm->cached_state);
dm->cached_state = NULL;
amdgpu_dm_irq_resume_late(adev);
return 0;
}
/**
* DOC: DM Lifecycle
*
* DM (and consequently DC) is registered in the amdgpu base driver as a IP
* block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
* the base driver's device list to be initialized and torn down accordingly.
*
* The functions to do so are provided as hooks in &struct amd_ip_funcs.
*/
static const struct amd_ip_funcs amdgpu_dm_funcs = {
.name = "dm",
.early_init = dm_early_init,
.late_init = dm_late_init,
.sw_init = dm_sw_init,
.sw_fini = dm_sw_fini,
.hw_init = dm_hw_init,
.hw_fini = dm_hw_fini,
.suspend = dm_suspend,
.resume = dm_resume,
.is_idle = dm_is_idle,
.wait_for_idle = dm_wait_for_idle,
.check_soft_reset = dm_check_soft_reset,
.soft_reset = dm_soft_reset,
.set_clockgating_state = dm_set_clockgating_state,
.set_powergating_state = dm_set_powergating_state,
};
const struct amdgpu_ip_block_version dm_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_DCE,
.major = 1,
.minor = 0,
.rev = 0,
.funcs = &amdgpu_dm_funcs,
};
/**
* DOC: atomic
*
* *WIP*
*/
static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
.fb_create = amdgpu_display_user_framebuffer_create,
.output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = amdgpu_dm_atomic_check,
.atomic_commit = amdgpu_dm_atomic_commit,
};
static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
};
static void
amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
{
struct drm_connector *connector = &aconnector->base;
struct drm_device *dev = connector->dev;
struct dc_sink *sink;
/* MST handled by drm_mst framework */
if (aconnector->mst_mgr.mst_state == true)
return;
sink = aconnector->dc_link->local_sink;
if (sink)
dc_sink_retain(sink);
/*
* Edid mgmt connector gets first update only in mode_valid hook and then
* the connector sink is set to either fake or physical sink depends on link status.
* Skip if already done during boot.
*/
if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
&& aconnector->dc_em_sink) {
/*
* For S3 resume with headless use eml_sink to fake stream
* because on resume connector->sink is set to NULL
*/
mutex_lock(&dev->mode_config.mutex);
if (sink) {
if (aconnector->dc_sink) {
amdgpu_dm_update_freesync_caps(connector, NULL);
/*
* retain and release below are used to
* bump up refcount for sink because the link doesn't point
* to it anymore after disconnect, so on next crtc to connector
* reshuffle by UMD we will get into unwanted dc_sink release
*/
dc_sink_release(aconnector->dc_sink);
}
aconnector->dc_sink = sink;
dc_sink_retain(aconnector->dc_sink);
amdgpu_dm_update_freesync_caps(connector,
aconnector->edid);
} else {
amdgpu_dm_update_freesync_caps(connector, NULL);
if (!aconnector->dc_sink) {
aconnector->dc_sink = aconnector->dc_em_sink;
dc_sink_retain(aconnector->dc_sink);
}
}
mutex_unlock(&dev->mode_config.mutex);
if (sink)
dc_sink_release(sink);
return;
}
/*
* TODO: temporary guard to look for proper fix
* if this sink is MST sink, we should not do anything
*/
if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
dc_sink_release(sink);
return;
}
if (aconnector->dc_sink == sink) {
/*
* We got a DP short pulse (Link Loss, DP CTS, etc...).
* Do nothing!!
*/
DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
aconnector->connector_id);
if (sink)
dc_sink_release(sink);
return;
}
DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
aconnector->connector_id, aconnector->dc_sink, sink);
mutex_lock(&dev->mode_config.mutex);
/*
* 1. Update status of the drm connector
* 2. Send an event and let userspace tell us what to do
*/
if (sink) {
/*
* TODO: check if we still need the S3 mode update workaround.
* If yes, put it here.
*/
if (aconnector->dc_sink)
amdgpu_dm_update_freesync_caps(connector, NULL);
aconnector->dc_sink = sink;
dc_sink_retain(aconnector->dc_sink);
if (sink->dc_edid.length == 0) {
aconnector->edid = NULL;
drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
} else {
aconnector->edid =
(struct edid *) sink->dc_edid.raw_edid;
drm_connector_update_edid_property(connector,
aconnector->edid);
drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
aconnector->edid);
}
amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
} else {
drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
amdgpu_dm_update_freesync_caps(connector, NULL);
drm_connector_update_edid_property(connector, NULL);
aconnector->num_modes = 0;
dc_sink_release(aconnector->dc_sink);
aconnector->dc_sink = NULL;
aconnector->edid = NULL;
}
mutex_unlock(&dev->mode_config.mutex);
if (sink)
dc_sink_release(sink);
}
static void handle_hpd_irq(void *param)
{
struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
struct drm_connector *connector = &aconnector->base;
struct drm_device *dev = connector->dev;
enum dc_connection_type new_connection_type = dc_connection_none;
/*
* In case of failure or MST no need to update connector status or notify the OS
* since (for MST case) MST does this in its own context.
*/
mutex_lock(&aconnector->hpd_lock);
if (aconnector->fake_enable)
aconnector->fake_enable = false;
if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
DRM_ERROR("KMS: Failed to detect connector\n");
if (aconnector->base.force && new_connection_type == dc_connection_none) {
emulated_link_detect(aconnector->dc_link);
drm_modeset_lock_all(dev);
dm_restore_drm_connector_state(dev, connector);
drm_modeset_unlock_all(dev);
if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
drm_kms_helper_hotplug_event(dev);
} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
amdgpu_dm_update_connector_after_detect(aconnector);
drm_modeset_lock_all(dev);
dm_restore_drm_connector_state(dev, connector);
drm_modeset_unlock_all(dev);
if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
drm_kms_helper_hotplug_event(dev);
}
mutex_unlock(&aconnector->hpd_lock);
}
static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
{
uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
uint8_t dret;
bool new_irq_handled = false;
int dpcd_addr;
int dpcd_bytes_to_read;
const int max_process_count = 30;
int process_count = 0;
const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
/* DPCD 0x200 - 0x201 for downstream IRQ */
dpcd_addr = DP_SINK_COUNT;
} else {
dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
/* DPCD 0x2002 - 0x2005 for downstream IRQ */
dpcd_addr = DP_SINK_COUNT_ESI;
}
dret = drm_dp_dpcd_read(
&aconnector->dm_dp_aux.aux,
dpcd_addr,
esi,
dpcd_bytes_to_read);
while (dret == dpcd_bytes_to_read &&
process_count < max_process_count) {
uint8_t retry;
dret = 0;
process_count++;
DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
/* handle HPD short pulse irq */
if (aconnector->mst_mgr.mst_state)
drm_dp_mst_hpd_irq(
&aconnector->mst_mgr,
esi,
&new_irq_handled);
if (new_irq_handled) {
/* ACK at DPCD to notify down stream */
const int ack_dpcd_bytes_to_write =
dpcd_bytes_to_read - 1;
for (retry = 0; retry < 3; retry++) {
uint8_t wret;
wret = drm_dp_dpcd_write(
&aconnector->dm_dp_aux.aux,
dpcd_addr + 1,
&esi[1],
ack_dpcd_bytes_to_write);
if (wret == ack_dpcd_bytes_to_write)
break;
}
/* check if there is new irq to be handled */
dret = drm_dp_dpcd_read(
&aconnector->dm_dp_aux.aux,
dpcd_addr,
esi,
dpcd_bytes_to_read);
new_irq_handled = false;
} else {
break;
}
}
if (process_count == max_process_count)
DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
}
static void handle_hpd_rx_irq(void *param)
{
struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
struct drm_connector *connector = &aconnector->base;
struct drm_device *dev = connector->dev;
struct dc_link *dc_link = aconnector->dc_link;
bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
enum dc_connection_type new_connection_type = dc_connection_none;
/*
* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
* conflict, after implement i2c helper, this mutex should be
* retired.
*/
if (dc_link->type != dc_connection_mst_branch)
mutex_lock(&aconnector->hpd_lock);
if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
!is_mst_root_connector) {
/* Downstream Port status changed. */
if (!dc_link_detect_sink(dc_link, &new_connection_type))
DRM_ERROR("KMS: Failed to detect connector\n");
if (aconnector->base.force && new_connection_type == dc_connection_none) {
emulated_link_detect(dc_link);
if (aconnector->fake_enable)
aconnector->fake_enable = false;
amdgpu_dm_update_connector_after_detect(aconnector);
drm_modeset_lock_all(dev);
dm_restore_drm_connector_state(dev, connector);
drm_modeset_unlock_all(dev);
drm_kms_helper_hotplug_event(dev);
} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
if (aconnector->fake_enable)
aconnector->fake_enable = false;
amdgpu_dm_update_connector_after_detect(aconnector);
drm_modeset_lock_all(dev);
dm_restore_drm_connector_state(dev, connector);
drm_modeset_unlock_all(dev);
drm_kms_helper_hotplug_event(dev);
}
}
if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
(dc_link->type == dc_connection_mst_branch))
dm_handle_hpd_rx_irq(aconnector);
if (dc_link->type != dc_connection_mst_branch) {
drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
mutex_unlock(&aconnector->hpd_lock);
}
}
static void register_hpd_handlers(struct amdgpu_device *adev)
{
struct drm_device *dev = adev->ddev;
struct drm_connector *connector;
struct amdgpu_dm_connector *aconnector;
const struct dc_link *dc_link;
struct dc_interrupt_params int_params = {0};
int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
list_for_each_entry(connector,
&dev->mode_config.connector_list, head) {
aconnector = to_amdgpu_dm_connector(connector);
dc_link = aconnector->dc_link;
if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
int_params.irq_source = dc_link->irq_source_hpd;
amdgpu_dm_irq_register_interrupt(adev, &int_params,
handle_hpd_irq,
(void *) aconnector);
}
if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
/* Also register for DP short pulse (hpd_rx). */
int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
int_params.irq_source = dc_link->irq_source_hpd_rx;
amdgpu_dm_irq_register_interrupt(adev, &int_params,
handle_hpd_rx_irq,
(void *) aconnector);
}
}
}
/* Register IRQ sources and initialize IRQ callbacks */
static int dce110_register_irq_handlers(struct amdgpu_device *adev)
{
struct dc *dc = adev->dm.dc;
struct common_irq_params *c_irq_params;
struct dc_interrupt_params int_params = {0};
int r;
int i;
unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
if (adev->asic_type >= CHIP_VEGA10)
client_id = SOC15_IH_CLIENTID_DCE;
int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
/*
* Actions of amdgpu_irq_add_id():
* 1. Register a set() function with base driver.
* Base driver will call set() function to enable/disable an
* interrupt in DC hardware.
* 2. Register amdgpu_dm_irq_handler().
* Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
* coming from DC hardware.
* amdgpu_dm_irq_handler() will re-direct the interrupt to DC
* for acknowledging and handling. */
/* Use VBLANK interrupt */
for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
if (r) {
DRM_ERROR("Failed to add crtc irq id!\n");
return r;
}
int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
int_params.irq_source =
dc_interrupt_to_irq_source(dc, i, 0);
c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
c_irq_params->adev = adev;
c_irq_params->irq_src = int_params.irq_source;
amdgpu_dm_irq_register_interrupt(adev, &int_params,
dm_crtc_high_irq, c_irq_params);
}
/* Use VUPDATE interrupt */
for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
if (r) {
DRM_ERROR("Failed to add vupdate irq id!\n");
return r;
}
int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
int_params.irq_source =
dc_interrupt_to_irq_source(dc, i, 0);
c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
c_irq_params->adev = adev;
c_irq_params->irq_src = int_params.irq_source;
amdgpu_dm_irq_register_interrupt(adev, &int_params,
dm_vupdate_high_irq, c_irq_params);
}
/* Use GRPH_PFLIP interrupt */
for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
if (r) {
DRM_ERROR("Failed to add page flip irq id!\n");
return r;
}
int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
int_params.irq_source =
dc_interrupt_to_irq_source(dc, i, 0);
c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
c_irq_params->adev = adev;
c_irq_params->irq_src = int_params.irq_source;
amdgpu_dm_irq_register_interrupt(adev, &int_params,
dm_pflip_high_irq, c_irq_params);
}
/* HPD */
r = amdgpu_irq_add_id(adev, client_id,
VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
if (r) {
DRM_ERROR("Failed to add hpd irq id!\n");
return r;
}
register_hpd_handlers(adev);
return 0;
}
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
/* Register IRQ sources and initialize IRQ callbacks */
static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
{
struct dc *dc = adev->dm.dc;
struct common_irq_params *c_irq_params;
struct dc_interrupt_params int_params = {0};
int r;
int i;
int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
/*
* Actions of amdgpu_irq_add_id():
* 1. Register a set() function with base driver.
* Base driver will call set() function to enable/disable an
* interrupt in DC hardware.
* 2. Register amdgpu_dm_irq_handler().
* Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
* coming from DC hardware.
* amdgpu_dm_irq_handler() will re-direct the interrupt to DC
* for acknowledging and handling.
*/
/* Use VSTARTUP interrupt */
for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
i++) {
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
if (r) {
DRM_ERROR("Failed to add crtc irq id!\n");
return r;
}
int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
int_params.irq_source =
dc_interrupt_to_irq_source(dc, i, 0);
c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
c_irq_params->adev = adev;
c_irq_params->irq_src = int_params.irq_source;
amdgpu_dm_irq_register_interrupt(adev, &int_params,
dm_crtc_high_irq, c_irq_params);
}
/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
* the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
* to trigger at end of each vblank, regardless of state of the lock,
* matching DCE behaviour.
*/
for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
i++) {
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
if (r) {
DRM_ERROR("Failed to add vupdate irq id!\n");
return r;
}
int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
int_params.irq_source =
dc_interrupt_to_irq_source(dc, i, 0);
c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
c_irq_params->adev = adev;
c_irq_params->irq_src = int_params.irq_source;
amdgpu_dm_irq_register_interrupt(adev, &int_params,
dm_vupdate_high_irq, c_irq_params);
}
/* Use GRPH_PFLIP interrupt */
for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + adev->mode_info.num_crtc - 1;
i++) {
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
if (r) {
DRM_ERROR("Failed to add page flip irq id!\n");
return r;
}
int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
int_params.irq_source =
dc_interrupt_to_irq_source(dc, i, 0);
c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
c_irq_params->adev = adev;
c_irq_params->irq_src = int_params.irq_source;
amdgpu_dm_irq_register_interrupt(adev, &int_params,
dm_pflip_high_irq, c_irq_params);
}
/* HPD */
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
&adev->hpd_irq);
if (r) {
DRM_ERROR("Failed to add hpd irq id!\n");
return r;
}
register_hpd_handlers(adev);
return 0;
}
#endif
/*
* Acquires the lock for the atomic state object and returns
* the new atomic state.
*
* This should only be called during atomic check.
*/
static int dm_atomic_get_state(struct drm_atomic_state *state,
struct dm_atomic_state **dm_state)
{
struct drm_device *dev = state->dev;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_display_manager *dm = &adev->dm;
struct drm_private_state *priv_state;
if (*dm_state)
return 0;
priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
if (IS_ERR(priv_state))
return PTR_ERR(priv_state);
*dm_state = to_dm_atomic_state(priv_state);
return 0;
}
struct dm_atomic_state *
dm_atomic_get_new_state(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_display_manager *dm = &adev->dm;
struct drm_private_obj *obj;
struct drm_private_state *new_obj_state;
int i;
for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
if (obj->funcs == dm->atomic_obj.funcs)
return to_dm_atomic_state(new_obj_state);
}
return NULL;
}
struct dm_atomic_state *
dm_atomic_get_old_state(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_display_manager *dm = &adev->dm;
struct drm_private_obj *obj;
struct drm_private_state *old_obj_state;
int i;
for_each_old_private_obj_in_state(state, obj, old_obj_state, i) {
if (obj->funcs == dm->atomic_obj.funcs)
return to_dm_atomic_state(old_obj_state);
}
return NULL;
}
static struct drm_private_state *
dm_atomic_duplicate_state(struct drm_private_obj *obj)
{
struct dm_atomic_state *old_state, *new_state;
new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
if (!new_state)
return NULL;
__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
old_state = to_dm_atomic_state(obj->state);
if (old_state && old_state->context)
new_state->context = dc_copy_state(old_state->context);
if (!new_state->context) {
kfree(new_state);
return NULL;
}
return &new_state->base;
}
static void dm_atomic_destroy_state(struct drm_private_obj *obj,
struct drm_private_state *state)
{
struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
if (dm_state && dm_state->context)
dc_release_state(dm_state->context);
kfree(dm_state);
}
static struct drm_private_state_funcs dm_atomic_state_funcs = {
.atomic_duplicate_state = dm_atomic_duplicate_state,
.atomic_destroy_state = dm_atomic_destroy_state,
};
static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
{
struct dm_atomic_state *state;
int r;
adev->mode_info.mode_config_initialized = true;
adev->ddev->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
adev->ddev->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
adev->ddev->mode_config.max_width = 16384;
adev->ddev->mode_config.max_height = 16384;
adev->ddev->mode_config.preferred_depth = 24;
adev->ddev->mode_config.prefer_shadow = 1;
/* indicates support for immediate flip */
adev->ddev->mode_config.async_page_flip = true;
adev->ddev->mode_config.fb_base = adev->gmc.aper_base;
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return -ENOMEM;
state->context = dc_create_state(adev->dm.dc);
if (!state->context) {
kfree(state);
return -ENOMEM;
}
dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
drm_atomic_private_obj_init(adev->ddev,
&adev->dm.atomic_obj,
&state->base,
&dm_atomic_state_funcs);
r = amdgpu_display_modeset_create_props(adev);
if (r)
return r;
r = amdgpu_dm_audio_init(adev);
if (r)
return r;
return 0;
}
#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
{
#if defined(CONFIG_ACPI)
struct amdgpu_dm_backlight_caps caps;
if (dm->backlight_caps.caps_valid)
return;
amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
if (caps.caps_valid) {
dm->backlight_caps.min_input_signal = caps.min_input_signal;
dm->backlight_caps.max_input_signal = caps.max_input_signal;
dm->backlight_caps.caps_valid = true;
} else {
dm->backlight_caps.min_input_signal =
AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
dm->backlight_caps.max_input_signal =
AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
}
#else
dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
#endif
}
static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
{
struct amdgpu_display_manager *dm = bl_get_data(bd);
struct amdgpu_dm_backlight_caps caps;
uint32_t brightness = bd->props.brightness;
amdgpu_dm_update_backlight_caps(dm);
caps = dm->backlight_caps;
/*
* The brightness input is in the range 0-255
* It needs to be rescaled to be between the
* requested min and max input signal
*
* It also needs to be scaled up by 0x101 to
* match the DC interface which has a range of
* 0 to 0xffff
*/
brightness =
brightness
* 0x101
* (caps.max_input_signal - caps.min_input_signal)
/ AMDGPU_MAX_BL_LEVEL
+ caps.min_input_signal * 0x101;
if (dc_link_set_backlight_level(dm->backlight_link,
brightness, 0))
return 0;
else
return 1;
}
static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
{
struct amdgpu_display_manager *dm = bl_get_data(bd);
int ret = dc_link_get_backlight_level(dm->backlight_link);
if (ret == DC_ERROR_UNEXPECTED)
return bd->props.brightness;
return ret;
}
static const struct backlight_ops amdgpu_dm_backlight_ops = {
.options = BL_CORE_SUSPENDRESUME,
.get_brightness = amdgpu_dm_backlight_get_brightness,
.update_status = amdgpu_dm_backlight_update_status,
};
static void
amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
{
char bl_name[16];
struct backlight_properties props = { 0 };
amdgpu_dm_update_backlight_caps(dm);
props.max_brightness = AMDGPU_MAX_BL_LEVEL;
props.brightness = AMDGPU_MAX_BL_LEVEL;
props.type = BACKLIGHT_RAW;
snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
dm->adev->ddev->primary->index);
dm->backlight_dev = backlight_device_register(bl_name,
dm->adev->ddev->dev,
dm,
&amdgpu_dm_backlight_ops,
&props);
if (IS_ERR(dm->backlight_dev))
DRM_ERROR("DM: Backlight registration failed!\n");
else
DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
}
#endif
static int initialize_plane(struct amdgpu_display_manager *dm,
struct amdgpu_mode_info *mode_info, int plane_id,
enum drm_plane_type plane_type,
const struct dc_plane_cap *plane_cap)
{
struct drm_plane *plane;
unsigned long possible_crtcs;
int ret = 0;
plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
if (!plane) {
DRM_ERROR("KMS: Failed to allocate plane\n");
return -ENOMEM;
}
plane->type = plane_type;
/*
* HACK: IGT tests expect that the primary plane for a CRTC
* can only have one possible CRTC. Only expose support for
* any CRTC if they're not going to be used as a primary plane
* for a CRTC - like overlay or underlay planes.
*/
possible_crtcs = 1 << plane_id;
if (plane_id >= dm->dc->caps.max_streams)
possible_crtcs = 0xff;
ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
if (ret) {
DRM_ERROR("KMS: Failed to initialize plane\n");
kfree(plane);
return ret;
}
if (mode_info)
mode_info->planes[plane_id] = plane;
return ret;
}
static void register_backlight_device(struct amdgpu_display_manager *dm,
struct dc_link *link)
{
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
link->type != dc_connection_none) {
/*
* Event if registration failed, we should continue with
* DM initialization because not having a backlight control
* is better then a black screen.
*/
amdgpu_dm_register_backlight_device(dm);
if (dm->backlight_dev)
dm->backlight_link = link;
}
#endif
}
/*
* In this architecture, the association
* connector -> encoder -> crtc
* id not really requried. The crtc and connector will hold the
* display_index as an abstraction to use with DAL component
*
* Returns 0 on success
*/
static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
{
struct amdgpu_display_manager *dm = &adev->dm;
int32_t i;
struct amdgpu_dm_connector *aconnector = NULL;
struct amdgpu_encoder *aencoder = NULL;
struct amdgpu_mode_info *mode_info = &adev->mode_info;
uint32_t link_cnt;
int32_t primary_planes;
enum dc_connection_type new_connection_type = dc_connection_none;
const struct dc_plane_cap *plane;
link_cnt = dm->dc->caps.max_links;
if (amdgpu_dm_mode_config_init(dm->adev)) {
DRM_ERROR("DM: Failed to initialize mode config\n");
return -EINVAL;
}
/* There is one primary plane per CRTC */
primary_planes = dm->dc->caps.max_streams;
ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
/*
* Initialize primary planes, implicit planes for legacy IOCTLS.
* Order is reversed to match iteration order in atomic check.
*/
for (i = (primary_planes - 1); i >= 0; i--) {
plane = &dm->dc->caps.planes[i];
if (initialize_plane(dm, mode_info, i,
DRM_PLANE_TYPE_PRIMARY, plane)) {
DRM_ERROR("KMS: Failed to initialize primary plane\n");
goto fail;
}
}
/*
* Initialize overlay planes, index starting after primary planes.
* These planes have a higher DRM index than the primary planes since
* they should be considered as having a higher z-order.
* Order is reversed to match iteration order in atomic check.
*
* Only support DCN for now, and only expose one so we don't encourage
* userspace to use up all the pipes.
*/
for (i = 0; i < dm->dc->caps.max_planes; ++i) {
struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
continue;
if (!plane->blends_with_above || !plane->blends_with_below)
continue;
if (!plane->pixel_format_support.argb8888)
continue;
if (initialize_plane(dm, NULL, primary_planes + i,
DRM_PLANE_TYPE_OVERLAY, plane)) {
DRM_ERROR("KMS: Failed to initialize overlay plane\n");
goto fail;
}
/* Only create one overlay plane. */
break;
}
for (i = 0; i < dm->dc->caps.max_streams; i++)
if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
DRM_ERROR("KMS: Failed to initialize crtc\n");
goto fail;
}
dm->display_indexes_num = dm->dc->caps.max_streams;
/* loops over all connectors on the board */
for (i = 0; i < link_cnt; i++) {
struct dc_link *link = NULL;
if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
DRM_ERROR(
"KMS: Cannot support more than %d display indexes\n",
AMDGPU_DM_MAX_DISPLAY_INDEX);
continue;
}
aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
if (!aconnector)
goto fail;
aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
if (!aencoder)
goto fail;
if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
DRM_ERROR("KMS: Failed to initialize encoder\n");
goto fail;
}
if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
DRM_ERROR("KMS: Failed to initialize connector\n");
goto fail;
}
link = dc_get_link_at_index(dm->dc, i);
if (!dc_link_detect_sink(link, &new_connection_type))
DRM_ERROR("KMS: Failed to detect connector\n");
if (aconnector->base.force && new_connection_type == dc_connection_none) {
emulated_link_detect(link);
amdgpu_dm_update_connector_after_detect(aconnector);
} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
amdgpu_dm_update_connector_after_detect(aconnector);
register_backlight_device(dm, link);
}
}
/* Software is initialized. Now we can register interrupt handlers. */
switch (adev->asic_type) {
case CHIP_BONAIRE:
case CHIP_HAWAII:
case CHIP_KAVERI:
case CHIP_KABINI:
case CHIP_MULLINS:
case CHIP_TONGA:
case CHIP_FIJI:
case CHIP_CARRIZO:
case CHIP_STONEY:
case CHIP_POLARIS11:
case CHIP_POLARIS10:
case CHIP_POLARIS12:
case CHIP_VEGAM:
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_VEGA20:
if (dce110_register_irq_handlers(dm->adev)) {
DRM_ERROR("DM: Failed to initialize IRQ\n");
goto fail;
}
break;
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
case CHIP_RAVEN:
#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
case CHIP_NAVI12:
case CHIP_NAVI10:
case CHIP_NAVI14:
#endif
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
case CHIP_RENOIR:
#endif
if (dcn10_register_irq_handlers(dm->adev)) {
DRM_ERROR("DM: Failed to initialize IRQ\n");
goto fail;
}
break;
#endif
default:
DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
goto fail;
}
if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
return 0;
fail:
kfree(aencoder);
kfree(aconnector);
return -EINVAL;
}
static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
{
drm_mode_config_cleanup(dm->ddev);
drm_atomic_private_obj_fini(&dm->atomic_obj);
return;
}
/******************************************************************************
* amdgpu_display_funcs functions
*****************************************************************************/
/*
* dm_bandwidth_update - program display watermarks
*
* @adev: amdgpu_device pointer
*
* Calculate and program the display watermarks and line buffer allocation.
*/
static void dm_bandwidth_update(struct amdgpu_device *adev)
{
/* TODO: implement later */
}
static const struct amdgpu_display_funcs dm_display_funcs = {
.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
.backlight_set_level = NULL, /* never called for DC */
.backlight_get_level = NULL, /* never called for DC */
.hpd_sense = NULL,/* called unconditionally */
.hpd_set_polarity = NULL, /* called unconditionally */
.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
.page_flip_get_scanoutpos =
dm_crtc_get_scanoutpos,/* called unconditionally */
.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
.add_connector = NULL, /* VBIOS parsing. DAL does it. */
};
#if defined(CONFIG_DEBUG_KERNEL_DC)
static ssize_t s3_debug_store(struct device *device,
struct device_attribute *attr,
const char *buf,
size_t count)
{
int ret;
int s3_state;
struct drm_device *drm_dev = dev_get_drvdata(device);
struct amdgpu_device *adev = drm_dev->dev_private;
ret = kstrtoint(buf, 0, &s3_state);
if (ret == 0) {
if (s3_state) {
dm_resume(adev);
drm_kms_helper_hotplug_event(adev->ddev);
} else
dm_suspend(adev);
}
return ret == 0 ? count : 0;
}
DEVICE_ATTR_WO(s3_debug);
#endif
static int dm_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
switch (adev->asic_type) {
case CHIP_BONAIRE:
case CHIP_HAWAII:
adev->mode_info.num_crtc = 6;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
break;
case CHIP_KAVERI:
adev->mode_info.num_crtc = 4;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 7;
break;
case CHIP_KABINI:
case CHIP_MULLINS:
adev->mode_info.num_crtc = 2;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
break;
case CHIP_FIJI:
case CHIP_TONGA:
adev->mode_info.num_crtc = 6;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 7;
break;
case CHIP_CARRIZO:
adev->mode_info.num_crtc = 3;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 9;
break;
case CHIP_STONEY:
adev->mode_info.num_crtc = 2;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 9;
break;
case CHIP_POLARIS11:
case CHIP_POLARIS12:
adev->mode_info.num_crtc = 5;
adev->mode_info.num_hpd = 5;
adev->mode_info.num_dig = 5;
break;
case CHIP_POLARIS10:
case CHIP_VEGAM:
adev->mode_info.num_crtc = 6;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
break;
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_VEGA20:
adev->mode_info.num_crtc = 6;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
break;
#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
case CHIP_RAVEN:
adev->mode_info.num_crtc = 4;
adev->mode_info.num_hpd = 4;
adev->mode_info.num_dig = 4;
break;
#endif
#if defined(CONFIG_DRM_AMD_DC_DCN2_0)
case CHIP_NAVI10:
case CHIP_NAVI12:
adev->mode_info.num_crtc = 6;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
break;
case CHIP_NAVI14:
adev->mode_info.num_crtc = 5;
adev->mode_info.num_hpd = 5;
adev->mode_info.num_dig = 5;
break;
#endif
#if defined(CONFIG_DRM_AMD_DC_DCN2_1)
case CHIP_RENOIR:
adev->mode_info.num_crtc = 4;
adev->mode_info.num_hpd = 4;
adev->mode_info.num_dig = 4;
break;
#endif
default:
DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
return -EINVAL;
}
amdgpu_dm_set_irq_funcs(adev);
if (adev->mode_info.funcs == NULL)
adev->mode_info.funcs = &dm_display_funcs;
/*
* Note: Do NOT change adev->audio_endpt_rreg and
* adev->audio_endpt_wreg because they are initialised in
* amdgpu_device_init()
*/
#if defined(CONFIG_DEBUG_KERNEL_DC)
device_create_file(
adev->ddev->dev,
&dev_attr_s3_debug);
#endif
return 0;
}
static bool modeset_required(struct drm_crtc_state *crtc_state,
struct dc_stream_state *new_stream,
struct dc_stream_state *old_stream)
{
if (!drm_atomic_crtc_needs_modeset(crtc_state))
return false;
if (!crtc_state->enable)
return false;
return crtc_state->active;
}
static bool modereset_required(struct drm_crtc_state *crtc_state)
{
if (!drm_atomic_crtc_needs_modeset(crtc_state))
return false;
return !crtc_state->enable || !crtc_state->active;
}
static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
{
drm_encoder_cleanup(encoder);
kfree(encoder);
}
static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
.destroy = amdgpu_dm_encoder_destroy,
};
static int fill_dc_scaling_info(const struct drm_plane_state *state,
struct dc_scaling_info *scaling_info)
{
int scale_w, scale_h;
memset(scaling_info, 0, sizeof(*scaling_info));
/* Source is fixed 16.16 but we ignore mantissa for now... */
scaling_info->src_rect.x = state->src_x >> 16;
scaling_info->src_rect.y = state->src_y >> 16;
scaling_info->src_rect.width = state->src_w >> 16;
if (scaling_info->src_rect.width == 0)
return -EINVAL;
scaling_info->src_rect.height = state->src_h >> 16;
if (scaling_info->src_rect.height == 0)
return -EINVAL;
scaling_info->dst_rect.x = state->crtc_x;
scaling_info->dst_rect.y = state->crtc_y;
if (state->crtc_w == 0)
return -EINVAL;
scaling_info->dst_rect.width = state->crtc_w;
if (state->crtc_h == 0)
return -EINVAL;
scaling_info->dst_rect.height = state->crtc_h;
/* DRM doesn't specify clipping on destination output. */
scaling_info->clip_rect = scaling_info->dst_rect;
/* TODO: Validate scaling per-format with DC plane caps */
scale_w = scaling_info->dst_rect.width * 1000 /
scaling_info->src_rect.width;
if (scale_w < 250 || scale_w > 16000)
return -EINVAL;
scale_h = scaling_info->dst_rect.height * 1000 /
scaling_info->src_rect.height;
if (scale_h < 250 || scale_h > 16000)
return -EINVAL;
/*
* The "scaling_quality" can be ignored for now, quality = 0 has DC
* assume reasonable defaults based on the format.
*/
return 0;
}
static int get_fb_info(const struct amdgpu_framebuffer *amdgpu_fb,
uint64_t *tiling_flags)
{
struct amdgpu_bo *rbo = gem_to_amdgpu_bo(amdgpu_fb->base.obj[0]);
int r = amdgpu_bo_reserve(rbo, false);
if (unlikely(r)) {
/* Don't show error message when returning -ERESTARTSYS */
if (r != -ERESTARTSYS)
DRM_ERROR("Unable to reserve buffer: %d\n", r);
return r;
}
if (tiling_flags)
amdgpu_bo_get_tiling_flags(rbo, tiling_flags);
amdgpu_bo_unreserve(rbo);
return r;
}
static inline uint64_t get_dcc_address(uint64_t address, uint64_t tiling_flags)
{
uint32_t offset = AMDGPU_TILING_GET(tiling_flags, DCC_OFFSET_256B);
return offset ? (address + offset * 256) : 0;
}
static int
fill_plane_dcc_attributes(struct amdgpu_device *adev,
const struct amdgpu_framebuffer *afb,
const enum surface_pixel_format format,
const enum dc_rotation_angle rotation,
const struct plane_size *plane_size,
const union dc_tiling_info *tiling_info,
const uint64_t info,
struct dc_plane_dcc_param *dcc,
struct dc_plane_address *address)
{
struct dc *dc = adev->dm.dc;
struct dc_dcc_surface_param input;
struct dc_surface_dcc_cap output;
uint32_t offset = AMDGPU_TILING_GET(info, DCC_OFFSET_256B);
uint32_t i64b = AMDGPU_TILING_GET(info, DCC_INDEPENDENT_64B) != 0;
uint64_t dcc_address;
memset(&input, 0, sizeof(input));
memset(&output, 0, sizeof(output));
if (!offset)
return 0;
if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
return 0;
if (!dc->cap_funcs.get_dcc_compression_cap)
return -EINVAL;
input.format = format;
input.surface_size.width = plane_size->surface_size.width;
input.surface_size.height = plane_size->surface_size.height;
input.swizzle_mode = tiling_info->gfx9.swizzle;
if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
input.scan = SCAN_DIRECTION_HORIZONTAL;
else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
input.scan = SCAN_DIRECTION_VERTICAL;
if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
return -EINVAL;
if (!output.capable)
return -EINVAL;
if (i64b == 0 && output.grph.rgb.independent_64b_blks != 0)
return -EINVAL;
dcc->enable = 1;
dcc->meta_pitch =
AMDGPU_TILING_GET(info, DCC_PITCH_MAX) + 1;
dcc->independent_64b_blks = i64b;
dcc_address = get_dcc_address(afb->address, info);
address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
return 0;
}
static int
fill_plane_buffer_attributes(struct amdgpu_device *adev,
const struct amdgpu_framebuffer *afb,
const enum surface_pixel_format format,
const enum dc_rotation_angle rotation,
const uint64_t tiling_flags,
union dc_tiling_info *tiling_info,
struct plane_size *plane_size,
struct dc_plane_dcc_param *dcc,
struct dc_plane_address *address)
{
const struct drm_framebuffer *fb = &afb->base;
int ret;
memset(tiling_info, 0, sizeof(*tiling_info));
memset(plane_size, 0, sizeof(*plane_size));
memset(dcc, 0, sizeof(*dcc));
memset(address, 0, sizeof(*address));
if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
plane_size->surface_size.x = 0;
plane_size->surface_size.y = 0;
plane_size->surface_size.width = fb->width;
plane_size->surface_size.height = fb->height;
plane_size->surface_pitch =
fb->pitches[0] / fb->format->cpp[0];
address->type = PLN_ADDR_TYPE_GRAPHICS;
address->grph.addr.low_part = lower_32_bits(afb->address);
address->grph.addr.high_part = upper_32_bits(afb->address);
} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
uint64_t chroma_addr = afb->address + fb->offsets[1];
plane_size->surface_size.x = 0;
plane_size->surface_size.y = 0;
plane_size->surface_size.width = fb->width;
plane_size->surface_size.height = fb->height;
plane_size->surface_pitch =
fb->pitches[0] / fb->format->cpp[0];
plane_size->chroma_size.x = 0;
plane_size->chroma_size.y = 0;
/* TODO: set these based on surface format */
plane_size->chroma_size.width = fb->width / 2;
plane_size->chroma_size.height = fb->height / 2;
plane_size->chroma_pitch =
fb->pitches[1] / fb->format->cpp[1];
address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
address->video_progressive.luma_addr.low_part =
lower_32_bits(afb->address);
address->video_progressive.luma_addr.high_part =
upper_32_bits(afb->address);
address->video_progressive.chroma_addr.low_part =
lower_32_bits(chroma_addr);
address->video_progressive.chroma_addr.high_part =
upper_32_bits(chroma_addr);
}
/* Fill GFX8 params */
if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
/* XXX fix me for VI */
tiling_info->gfx8.num_banks = num_banks;
tiling_info->gfx8.array_mode =
DC_ARRAY_2D_TILED_THIN1;
tiling_info->gfx8.tile_split = tile_split;
tiling_info->gfx8.bank_width = bankw;
tiling_info->gfx8.bank_height = bankh;
tiling_info->gfx8.tile_aspect